1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67#include <linux/module.h>
68#include <linux/types.h>
69#include <linux/jiffies.h>
70#include <linux/kernel.h>
71#include <linux/fcntl.h>
72#include <linux/socket.h>
73#include <linux/in.h>
74#include <linux/inet.h>
75#include <linux/inetdevice.h>
76#include <linux/netdevice.h>
77#include <linux/string.h>
78#include <linux/netfilter_ipv4.h>
79#include <net/snmp.h>
80#include <net/ip.h>
81#include <net/route.h>
82#include <net/protocol.h>
83#include <net/icmp.h>
84#include <net/tcp.h>
85#include <net/udp.h>
86#include <net/raw.h>
87#include <linux/skbuff.h>
88#include <net/sock.h>
89#include <linux/errno.h>
90#include <linux/timer.h>
91#include <linux/init.h>
92#include <asm/system.h>
93#include <asm/uaccess.h>
94#include <net/checksum.h>
95
96
97
98
99
100struct icmp_bxm {
101 struct sk_buff *skb;
102 int offset;
103 int data_len;
104
105 struct {
106 struct icmphdr icmph;
107 __be32 times[3];
108 } data;
109 int head_len;
110 struct ip_options replyopts;
111 unsigned char optbuf[40];
112};
113
114
115
116
117DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics) __read_mostly;
118DEFINE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics) __read_mostly;
119
120
121
122
123struct icmp_err icmp_err_convert[] = {
124 {
125 .errno = ENETUNREACH,
126 .fatal = 0,
127 },
128 {
129 .errno = EHOSTUNREACH,
130 .fatal = 0,
131 },
132 {
133 .errno = ENOPROTOOPT ,
134 .fatal = 1,
135 },
136 {
137 .errno = ECONNREFUSED,
138 .fatal = 1,
139 },
140 {
141 .errno = EMSGSIZE,
142 .fatal = 0,
143 },
144 {
145 .errno = EOPNOTSUPP,
146 .fatal = 0,
147 },
148 {
149 .errno = ENETUNREACH,
150 .fatal = 1,
151 },
152 {
153 .errno = EHOSTDOWN,
154 .fatal = 1,
155 },
156 {
157 .errno = ENONET,
158 .fatal = 1,
159 },
160 {
161 .errno = ENETUNREACH,
162 .fatal = 1,
163 },
164 {
165 .errno = EHOSTUNREACH,
166 .fatal = 1,
167 },
168 {
169 .errno = ENETUNREACH,
170 .fatal = 0,
171 },
172 {
173 .errno = EHOSTUNREACH,
174 .fatal = 0,
175 },
176 {
177 .errno = EHOSTUNREACH,
178 .fatal = 1,
179 },
180 {
181 .errno = EHOSTUNREACH,
182 .fatal = 1,
183 },
184 {
185 .errno = EHOSTUNREACH,
186 .fatal = 1,
187 },
188};
189
190
191int sysctl_icmp_echo_ignore_all __read_mostly;
192int sysctl_icmp_echo_ignore_broadcasts __read_mostly = 1;
193
194
195int sysctl_icmp_ignore_bogus_error_responses __read_mostly = 1;
196
197
198
199
200
201
202
203
204
205
206
207
208
209int sysctl_icmp_ratelimit __read_mostly = 1 * HZ;
210int sysctl_icmp_ratemask __read_mostly = 0x1818;
211int sysctl_icmp_errors_use_inbound_ifaddr __read_mostly;
212
213
214
215
216
217struct icmp_control {
218 void (*handler)(struct sk_buff *skb);
219 short error;
220};
221
222static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
223
224
225
226
227
228
229
230
231static DEFINE_PER_CPU(struct socket *, __icmp_socket) = NULL;
232#define icmp_socket __get_cpu_var(__icmp_socket)
233
234static __inline__ int icmp_xmit_lock(void)
235{
236 local_bh_disable();
237
238 if (unlikely(!spin_trylock(&icmp_socket->sk->sk_lock.slock))) {
239
240
241
242 local_bh_enable();
243 return 1;
244 }
245 return 0;
246}
247
248static void icmp_xmit_unlock(void)
249{
250 spin_unlock_bh(&icmp_socket->sk->sk_lock.slock);
251}
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274#define XRLIM_BURST_FACTOR 6
275int xrlim_allow(struct dst_entry *dst, int timeout)
276{
277 unsigned long now;
278 int rc = 0;
279
280 now = jiffies;
281 dst->rate_tokens += now - dst->rate_last;
282 dst->rate_last = now;
283 if (dst->rate_tokens > XRLIM_BURST_FACTOR * timeout)
284 dst->rate_tokens = XRLIM_BURST_FACTOR * timeout;
285 if (dst->rate_tokens >= timeout) {
286 dst->rate_tokens -= timeout;
287 rc = 1;
288 }
289 return rc;
290}
291
292static inline int icmpv4_xrlim_allow(struct rtable *rt, int type, int code)
293{
294 struct dst_entry *dst = &rt->u.dst;
295 int rc = 1;
296
297 if (type > NR_ICMP_TYPES)
298 goto out;
299
300
301 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
302 goto out;
303
304
305 if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
306 goto out;
307
308
309 if ((1 << type) & sysctl_icmp_ratemask)
310 rc = xrlim_allow(dst, sysctl_icmp_ratelimit);
311out:
312 return rc;
313}
314
315
316
317
318void icmp_out_count(unsigned char type)
319{
320 ICMPMSGOUT_INC_STATS(type);
321 ICMP_INC_STATS(ICMP_MIB_OUTMSGS);
322}
323
324
325
326
327
328static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
329 struct sk_buff *skb)
330{
331 struct icmp_bxm *icmp_param = (struct icmp_bxm *)from;
332 __wsum csum;
333
334 csum = skb_copy_and_csum_bits(icmp_param->skb,
335 icmp_param->offset + offset,
336 to, len, 0);
337
338 skb->csum = csum_block_add(skb->csum, csum, odd);
339 if (icmp_pointers[icmp_param->data.icmph.type].error)
340 nf_ct_attach(skb, icmp_param->skb);
341 return 0;
342}
343
344static void icmp_push_reply(struct icmp_bxm *icmp_param,
345 struct ipcm_cookie *ipc, struct rtable *rt)
346{
347 struct sk_buff *skb;
348
349 if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
350 icmp_param->data_len+icmp_param->head_len,
351 icmp_param->head_len,
352 ipc, rt, MSG_DONTWAIT) < 0)
353 ip_flush_pending_frames(icmp_socket->sk);
354 else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
355 struct icmphdr *icmph = icmp_hdr(skb);
356 __wsum csum = 0;
357 struct sk_buff *skb1;
358
359 skb_queue_walk(&icmp_socket->sk->sk_write_queue, skb1) {
360 csum = csum_add(csum, skb1->csum);
361 }
362 csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
363 (char *)icmph,
364 icmp_param->head_len, csum);
365 icmph->checksum = csum_fold(csum);
366 skb->ip_summed = CHECKSUM_NONE;
367 ip_push_pending_frames(icmp_socket->sk);
368 }
369}
370
371
372
373
374
375static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
376{
377 struct sock *sk = icmp_socket->sk;
378 struct inet_sock *inet = inet_sk(sk);
379 struct ipcm_cookie ipc;
380 struct rtable *rt = (struct rtable *)skb->dst;
381 __be32 daddr;
382
383 if (ip_options_echo(&icmp_param->replyopts, skb))
384 return;
385
386 if (icmp_xmit_lock())
387 return;
388
389 icmp_param->data.icmph.checksum = 0;
390
391 inet->tos = ip_hdr(skb)->tos;
392 daddr = ipc.addr = rt->rt_src;
393 ipc.opt = NULL;
394 if (icmp_param->replyopts.optlen) {
395 ipc.opt = &icmp_param->replyopts;
396 if (ipc.opt->srr)
397 daddr = icmp_param->replyopts.faddr;
398 }
399 {
400 struct flowi fl = { .nl_u = { .ip4_u =
401 { .daddr = daddr,
402 .saddr = rt->rt_spec_dst,
403 .tos = RT_TOS(ip_hdr(skb)->tos) } },
404 .proto = IPPROTO_ICMP };
405 security_skb_classify_flow(skb, &fl);
406 if (ip_route_output_key(&rt, &fl))
407 goto out_unlock;
408 }
409 if (icmpv4_xrlim_allow(rt, icmp_param->data.icmph.type,
410 icmp_param->data.icmph.code))
411 icmp_push_reply(icmp_param, &ipc, rt);
412 ip_rt_put(rt);
413out_unlock:
414 icmp_xmit_unlock();
415}
416
417
418
419
420
421
422
423
424
425
426
427
428
429void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
430{
431 struct iphdr *iph;
432 int room;
433 struct icmp_bxm icmp_param;
434 struct rtable *rt = (struct rtable *)skb_in->dst;
435 struct ipcm_cookie ipc;
436 __be32 saddr;
437 u8 tos;
438
439 if (!rt)
440 goto out;
441
442
443
444
445
446
447 iph = ip_hdr(skb_in);
448
449 if ((u8 *)iph < skb_in->head ||
450 (skb_in->network_header + sizeof(*iph)) > skb_in->tail)
451 goto out;
452
453
454
455
456 if (skb_in->pkt_type != PACKET_HOST)
457 goto out;
458
459
460
461
462 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
463 goto out;
464
465
466
467
468
469 if (iph->frag_off & htons(IP_OFFSET))
470 goto out;
471
472
473
474
475 if (icmp_pointers[type].error) {
476
477
478
479
480 if (iph->protocol == IPPROTO_ICMP) {
481 u8 _inner_type, *itp;
482
483 itp = skb_header_pointer(skb_in,
484 skb_network_header(skb_in) +
485 (iph->ihl << 2) +
486 offsetof(struct icmphdr,
487 type) -
488 skb_in->data,
489 sizeof(_inner_type),
490 &_inner_type);
491 if (itp == NULL)
492 goto out;
493
494
495
496
497
498 if (*itp > NR_ICMP_TYPES ||
499 icmp_pointers[*itp].error)
500 goto out;
501 }
502 }
503
504 if (icmp_xmit_lock())
505 return;
506
507
508
509
510
511 saddr = iph->daddr;
512 if (!(rt->rt_flags & RTCF_LOCAL)) {
513 struct net_device *dev = NULL;
514
515 if (rt->fl.iif && sysctl_icmp_errors_use_inbound_ifaddr)
516 dev = dev_get_by_index(&init_net, rt->fl.iif);
517
518 if (dev) {
519 saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
520 dev_put(dev);
521 } else
522 saddr = 0;
523 }
524
525 tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
526 IPTOS_PREC_INTERNETCONTROL) :
527 iph->tos;
528
529 if (ip_options_echo(&icmp_param.replyopts, skb_in))
530 goto out_unlock;
531
532
533
534
535
536
537 icmp_param.data.icmph.type = type;
538 icmp_param.data.icmph.code = code;
539 icmp_param.data.icmph.un.gateway = info;
540 icmp_param.data.icmph.checksum = 0;
541 icmp_param.skb = skb_in;
542 icmp_param.offset = skb_network_offset(skb_in);
543 inet_sk(icmp_socket->sk)->tos = tos;
544 ipc.addr = iph->saddr;
545 ipc.opt = &icmp_param.replyopts;
546
547 {
548 struct flowi fl = {
549 .nl_u = {
550 .ip4_u = {
551 .daddr = icmp_param.replyopts.srr ?
552 icmp_param.replyopts.faddr :
553 iph->saddr,
554 .saddr = saddr,
555 .tos = RT_TOS(tos)
556 }
557 },
558 .proto = IPPROTO_ICMP,
559 .uli_u = {
560 .icmpt = {
561 .type = type,
562 .code = code
563 }
564 }
565 };
566 security_skb_classify_flow(skb_in, &fl);
567 if (ip_route_output_key(&rt, &fl))
568 goto out_unlock;
569 }
570
571 if (!icmpv4_xrlim_allow(rt, type, code))
572 goto ende;
573
574
575
576 room = dst_mtu(&rt->u.dst);
577 if (room > 576)
578 room = 576;
579 room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen;
580 room -= sizeof(struct icmphdr);
581
582 icmp_param.data_len = skb_in->len - icmp_param.offset;
583 if (icmp_param.data_len > room)
584 icmp_param.data_len = room;
585 icmp_param.head_len = sizeof(struct icmphdr);
586
587 icmp_push_reply(&icmp_param, &ipc, rt);
588ende:
589 ip_rt_put(rt);
590out_unlock:
591 icmp_xmit_unlock();
592out:;
593}
594
595
596
597
598
599
600static void icmp_unreach(struct sk_buff *skb)
601{
602 struct iphdr *iph;
603 struct icmphdr *icmph;
604 int hash, protocol;
605 struct net_protocol *ipprot;
606 struct sock *raw_sk;
607 u32 info = 0;
608
609
610
611
612
613
614
615 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
616 goto out_err;
617
618 icmph = icmp_hdr(skb);
619 iph = (struct iphdr *)skb->data;
620
621 if (iph->ihl < 5)
622 goto out_err;
623
624 if (icmph->type == ICMP_DEST_UNREACH) {
625 switch (icmph->code & 15) {
626 case ICMP_NET_UNREACH:
627 case ICMP_HOST_UNREACH:
628 case ICMP_PROT_UNREACH:
629 case ICMP_PORT_UNREACH:
630 break;
631 case ICMP_FRAG_NEEDED:
632 if (ipv4_config.no_pmtu_disc) {
633 LIMIT_NETDEBUG(KERN_INFO "ICMP: %u.%u.%u.%u: "
634 "fragmentation needed "
635 "and DF set.\n",
636 NIPQUAD(iph->daddr));
637 } else {
638 info = ip_rt_frag_needed(iph,
639 ntohs(icmph->un.frag.mtu));
640 if (!info)
641 goto out;
642 }
643 break;
644 case ICMP_SR_FAILED:
645 LIMIT_NETDEBUG(KERN_INFO "ICMP: %u.%u.%u.%u: Source "
646 "Route Failed.\n",
647 NIPQUAD(iph->daddr));
648 break;
649 default:
650 break;
651 }
652 if (icmph->code > NR_ICMP_UNREACH)
653 goto out;
654 } else if (icmph->type == ICMP_PARAMETERPROB)
655 info = ntohl(icmph->un.gateway) >> 24;
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675 if (!sysctl_icmp_ignore_bogus_error_responses &&
676 inet_addr_type(iph->daddr) == RTN_BROADCAST) {
677 if (net_ratelimit())
678 printk(KERN_WARNING "%u.%u.%u.%u sent an invalid ICMP "
679 "type %u, code %u "
680 "error to a broadcast: %u.%u.%u.%u on %s\n",
681 NIPQUAD(ip_hdr(skb)->saddr),
682 icmph->type, icmph->code,
683 NIPQUAD(iph->daddr),
684 skb->dev->name);
685 goto out;
686 }
687
688
689
690
691 if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
692 goto out;
693
694 iph = (struct iphdr *)skb->data;
695 protocol = iph->protocol;
696
697
698
699
700
701
702 hash = protocol & (MAX_INET_PROTOS - 1);
703 read_lock(&raw_v4_lock);
704 if ((raw_sk = sk_head(&raw_v4_htable[hash])) != NULL) {
705 while ((raw_sk = __raw_v4_lookup(raw_sk, protocol, iph->daddr,
706 iph->saddr,
707 skb->dev->ifindex)) != NULL) {
708 raw_err(raw_sk, skb, info);
709 raw_sk = sk_next(raw_sk);
710 iph = (struct iphdr *)skb->data;
711 }
712 }
713 read_unlock(&raw_v4_lock);
714
715 rcu_read_lock();
716 ipprot = rcu_dereference(inet_protos[hash]);
717 if (ipprot && ipprot->err_handler)
718 ipprot->err_handler(skb, info);
719 rcu_read_unlock();
720
721out:
722 return;
723out_err:
724 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
725 goto out;
726}
727
728
729
730
731
732
733static void icmp_redirect(struct sk_buff *skb)
734{
735 struct iphdr *iph;
736
737 if (skb->len < sizeof(struct iphdr))
738 goto out_err;
739
740
741
742
743 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
744 goto out;
745
746 iph = (struct iphdr *)skb->data;
747
748 switch (icmp_hdr(skb)->code & 7) {
749 case ICMP_REDIR_NET:
750 case ICMP_REDIR_NETTOS:
751
752
753
754 case ICMP_REDIR_HOST:
755 case ICMP_REDIR_HOSTTOS:
756 ip_rt_redirect(ip_hdr(skb)->saddr, iph->daddr,
757 icmp_hdr(skb)->un.gateway,
758 iph->saddr, skb->dev);
759 break;
760 }
761out:
762 return;
763out_err:
764 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
765 goto out;
766}
767
768
769
770
771
772
773
774
775
776
777
778
779
780static void icmp_echo(struct sk_buff *skb)
781{
782 if (!sysctl_icmp_echo_ignore_all) {
783 struct icmp_bxm icmp_param;
784
785 icmp_param.data.icmph = *icmp_hdr(skb);
786 icmp_param.data.icmph.type = ICMP_ECHOREPLY;
787 icmp_param.skb = skb;
788 icmp_param.offset = 0;
789 icmp_param.data_len = skb->len;
790 icmp_param.head_len = sizeof(struct icmphdr);
791 icmp_reply(&icmp_param, skb);
792 }
793}
794
795
796
797
798
799
800
801
802static void icmp_timestamp(struct sk_buff *skb)
803{
804 struct timeval tv;
805 struct icmp_bxm icmp_param;
806
807
808
809 if (skb->len < 4)
810 goto out_err;
811
812
813
814
815 do_gettimeofday(&tv);
816 icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * 1000 +
817 tv.tv_usec / 1000);
818 icmp_param.data.times[2] = icmp_param.data.times[1];
819 if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4))
820 BUG();
821 icmp_param.data.icmph = *icmp_hdr(skb);
822 icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY;
823 icmp_param.data.icmph.code = 0;
824 icmp_param.skb = skb;
825 icmp_param.offset = 0;
826 icmp_param.data_len = 0;
827 icmp_param.head_len = sizeof(struct icmphdr) + 12;
828 icmp_reply(&icmp_param, skb);
829out:
830 return;
831out_err:
832 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
833 goto out;
834}
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870static void icmp_address(struct sk_buff *skb)
871{
872#if 0
873 if (net_ratelimit())
874 printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n");
875#endif
876}
877
878
879
880
881
882
883static void icmp_address_reply(struct sk_buff *skb)
884{
885 struct rtable *rt = (struct rtable *)skb->dst;
886 struct net_device *dev = skb->dev;
887 struct in_device *in_dev;
888 struct in_ifaddr *ifa;
889
890 if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC))
891 goto out;
892
893 in_dev = in_dev_get(dev);
894 if (!in_dev)
895 goto out;
896 rcu_read_lock();
897 if (in_dev->ifa_list &&
898 IN_DEV_LOG_MARTIANS(in_dev) &&
899 IN_DEV_FORWARD(in_dev)) {
900 __be32 _mask, *mp;
901
902 mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask);
903 BUG_ON(mp == NULL);
904 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
905 if (*mp == ifa->ifa_mask &&
906 inet_ifa_match(rt->rt_src, ifa))
907 break;
908 }
909 if (!ifa && net_ratelimit()) {
910 printk(KERN_INFO "Wrong address mask %u.%u.%u.%u from "
911 "%s/%u.%u.%u.%u\n",
912 NIPQUAD(*mp), dev->name, NIPQUAD(rt->rt_src));
913 }
914 }
915 rcu_read_unlock();
916 in_dev_put(in_dev);
917out:;
918}
919
920static void icmp_discard(struct sk_buff *skb)
921{
922}
923
924
925
926
927int icmp_rcv(struct sk_buff *skb)
928{
929 struct icmphdr *icmph;
930 struct rtable *rt = (struct rtable *)skb->dst;
931
932 ICMP_INC_STATS_BH(ICMP_MIB_INMSGS);
933
934 switch (skb->ip_summed) {
935 case CHECKSUM_COMPLETE:
936 if (!csum_fold(skb->csum))
937 break;
938
939 case CHECKSUM_NONE:
940 skb->csum = 0;
941 if (__skb_checksum_complete(skb))
942 goto error;
943 }
944
945 if (!pskb_pull(skb, sizeof(struct icmphdr)))
946 goto error;
947
948 icmph = icmp_hdr(skb);
949
950 ICMPMSGIN_INC_STATS_BH(icmph->type);
951
952
953
954
955
956
957 if (icmph->type > NR_ICMP_TYPES)
958 goto error;
959
960
961
962
963
964
965 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
966
967
968
969
970
971
972 if ((icmph->type == ICMP_ECHO ||
973 icmph->type == ICMP_TIMESTAMP) &&
974 sysctl_icmp_echo_ignore_broadcasts) {
975 goto error;
976 }
977 if (icmph->type != ICMP_ECHO &&
978 icmph->type != ICMP_TIMESTAMP &&
979 icmph->type != ICMP_ADDRESS &&
980 icmph->type != ICMP_ADDRESSREPLY) {
981 goto error;
982 }
983 }
984
985 icmp_pointers[icmph->type].handler(skb);
986
987drop:
988 kfree_skb(skb);
989 return 0;
990error:
991 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
992 goto drop;
993}
994
995
996
997
998static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
999 [ICMP_ECHOREPLY] = {
1000 .handler = icmp_discard,
1001 },
1002 [1] = {
1003 .handler = icmp_discard,
1004 .error = 1,
1005 },
1006 [2] = {
1007 .handler = icmp_discard,
1008 .error = 1,
1009 },
1010 [ICMP_DEST_UNREACH] = {
1011 .handler = icmp_unreach,
1012 .error = 1,
1013 },
1014 [ICMP_SOURCE_QUENCH] = {
1015 .handler = icmp_unreach,
1016 .error = 1,
1017 },
1018 [ICMP_REDIRECT] = {
1019 .handler = icmp_redirect,
1020 .error = 1,
1021 },
1022 [6] = {
1023 .handler = icmp_discard,
1024 .error = 1,
1025 },
1026 [7] = {
1027 .handler = icmp_discard,
1028 .error = 1,
1029 },
1030 [ICMP_ECHO] = {
1031 .handler = icmp_echo,
1032 },
1033 [9] = {
1034 .handler = icmp_discard,
1035 .error = 1,
1036 },
1037 [10] = {
1038 .handler = icmp_discard,
1039 .error = 1,
1040 },
1041 [ICMP_TIME_EXCEEDED] = {
1042 .handler = icmp_unreach,
1043 .error = 1,
1044 },
1045 [ICMP_PARAMETERPROB] = {
1046 .handler = icmp_unreach,
1047 .error = 1,
1048 },
1049 [ICMP_TIMESTAMP] = {
1050 .handler = icmp_timestamp,
1051 },
1052 [ICMP_TIMESTAMPREPLY] = {
1053 .handler = icmp_discard,
1054 },
1055 [ICMP_INFO_REQUEST] = {
1056 .handler = icmp_discard,
1057 },
1058 [ICMP_INFO_REPLY] = {
1059 .handler = icmp_discard,
1060 },
1061 [ICMP_ADDRESS] = {
1062 .handler = icmp_address,
1063 },
1064 [ICMP_ADDRESSREPLY] = {
1065 .handler = icmp_address_reply,
1066 },
1067};
1068
1069void __init icmp_init(struct net_proto_family *ops)
1070{
1071 struct inet_sock *inet;
1072 int i;
1073
1074 for_each_possible_cpu(i) {
1075 int err;
1076
1077 err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP,
1078 &per_cpu(__icmp_socket, i));
1079
1080 if (err < 0)
1081 panic("Failed to create the ICMP control socket.\n");
1082
1083 per_cpu(__icmp_socket, i)->sk->sk_allocation = GFP_ATOMIC;
1084
1085
1086
1087
1088 per_cpu(__icmp_socket, i)->sk->sk_sndbuf =
1089 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
1090
1091 inet = inet_sk(per_cpu(__icmp_socket, i)->sk);
1092 inet->uc_ttl = -1;
1093 inet->pmtudisc = IP_PMTUDISC_DONT;
1094
1095
1096
1097
1098
1099 per_cpu(__icmp_socket, i)->sk->sk_prot->unhash(per_cpu(__icmp_socket, i)->sk);
1100 }
1101}
1102
1103EXPORT_SYMBOL(icmp_err_convert);
1104EXPORT_SYMBOL(icmp_send);
1105EXPORT_SYMBOL(icmp_statistics);
1106EXPORT_SYMBOL(xrlim_allow);
1107