linux/net/ipv4/icmp.c
<<
>>
Prefs
   1/*
   2 *      NET3:   Implementation of the ICMP protocol layer.
   3 *
   4 *              Alan Cox, <alan@lxorguk.ukuu.org.uk>
   5 *
   6 *      This program is free software; you can redistribute it and/or
   7 *      modify it under the terms of the GNU General Public License
   8 *      as published by the Free Software Foundation; either version
   9 *      2 of the License, or (at your option) any later version.
  10 *
  11 *      Some of the function names and the icmp unreach table for this
  12 *      module were derived from [icmp.c 1.0.11 06/02/93] by
  13 *      Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting.
  14 *      Other than that this module is a complete rewrite.
  15 *
  16 *      Fixes:
  17 *      Clemens Fruhwirth       :       introduce global icmp rate limiting
  18 *                                      with icmp type masking ability instead
  19 *                                      of broken per type icmp timeouts.
  20 *              Mike Shaver     :       RFC1122 checks.
  21 *              Alan Cox        :       Multicast ping reply as self.
  22 *              Alan Cox        :       Fix atomicity lockup in ip_build_xmit
  23 *                                      call.
  24 *              Alan Cox        :       Added 216,128 byte paths to the MTU
  25 *                                      code.
  26 *              Martin Mares    :       RFC1812 checks.
  27 *              Martin Mares    :       Can be configured to follow redirects
  28 *                                      if acting as a router _without_ a
  29 *                                      routing protocol (RFC 1812).
  30 *              Martin Mares    :       Echo requests may be configured to
  31 *                                      be ignored (RFC 1812).
  32 *              Martin Mares    :       Limitation of ICMP error message
  33 *                                      transmit rate (RFC 1812).
  34 *              Martin Mares    :       TOS and Precedence set correctly
  35 *                                      (RFC 1812).
  36 *              Martin Mares    :       Now copying as much data from the
  37 *                                      original packet as we can without
  38 *                                      exceeding 576 bytes (RFC 1812).
  39 *      Willy Konynenberg       :       Transparent proxying support.
  40 *              Keith Owens     :       RFC1191 correction for 4.2BSD based
  41 *                                      path MTU bug.
  42 *              Thomas Quinot   :       ICMP Dest Unreach codes up to 15 are
  43 *                                      valid (RFC 1812).
  44 *              Andi Kleen      :       Check all packet lengths properly
  45 *                                      and moved all kfree_skb() up to
  46 *                                      icmp_rcv.
  47 *              Andi Kleen      :       Move the rate limit bookkeeping
  48 *                                      into the dest entry and use a token
  49 *                                      bucket filter (thanks to ANK). Make
  50 *                                      the rates sysctl configurable.
  51 *              Yu Tianli       :       Fixed two ugly bugs in icmp_send
  52 *                                      - IP option length was accounted wrongly
  53 *                                      - ICMP header length was not accounted
  54 *                                        at all.
  55 *              Tristan Greaves :       Added sysctl option to ignore bogus
  56 *                                      broadcast responses from broken routers.
  57 *
  58 * To Fix:
  59 *
  60 *      - Should use skb_pull() instead of all the manual checking.
  61 *        This would also greatly simply some upper layer error handlers. --AK
  62 *
  63 */
  64
  65#include <linux/module.h>
  66#include <linux/types.h>
  67#include <linux/jiffies.h>
  68#include <linux/kernel.h>
  69#include <linux/fcntl.h>
  70#include <linux/socket.h>
  71#include <linux/in.h>
  72#include <linux/inet.h>
  73#include <linux/inetdevice.h>
  74#include <linux/netdevice.h>
  75#include <linux/string.h>
  76#include <linux/netfilter_ipv4.h>
  77#include <linux/slab.h>
  78#include <net/snmp.h>
  79#include <net/ip.h>
  80#include <net/route.h>
  81#include <net/protocol.h>
  82#include <net/icmp.h>
  83#include <net/tcp.h>
  84#include <net/udp.h>
  85#include <net/raw.h>
  86#include <linux/skbuff.h>
  87#include <net/sock.h>
  88#include <linux/errno.h>
  89#include <linux/timer.h>
  90#include <linux/init.h>
  91#include <asm/system.h>
  92#include <asm/uaccess.h>
  93#include <net/checksum.h>
  94#include <net/xfrm.h>
  95#include <net/inet_common.h>
  96
  97/*
  98 *      Build xmit assembly blocks
  99 */
 100
 101struct icmp_bxm {
 102        struct sk_buff *skb;
 103        int offset;
 104        int data_len;
 105
 106        struct {
 107                struct icmphdr icmph;
 108                __be32         times[3];
 109        } data;
 110        int head_len;
 111        struct ip_options replyopts;
 112        unsigned char  optbuf[40];
 113};
 114
 115/* An array of errno for error messages from dest unreach. */
 116/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
 117
 118const struct icmp_err icmp_err_convert[] = {
 119        {
 120                .errno = ENETUNREACH,   /* ICMP_NET_UNREACH */
 121                .fatal = 0,
 122        },
 123        {
 124                .errno = EHOSTUNREACH,  /* ICMP_HOST_UNREACH */
 125                .fatal = 0,
 126        },
 127        {
 128                .errno = ENOPROTOOPT    /* ICMP_PROT_UNREACH */,
 129                .fatal = 1,
 130        },
 131        {
 132                .errno = ECONNREFUSED,  /* ICMP_PORT_UNREACH */
 133                .fatal = 1,
 134        },
 135        {
 136                .errno = EMSGSIZE,      /* ICMP_FRAG_NEEDED */
 137                .fatal = 0,
 138        },
 139        {
 140                .errno = EOPNOTSUPP,    /* ICMP_SR_FAILED */
 141                .fatal = 0,
 142        },
 143        {
 144                .errno = ENETUNREACH,   /* ICMP_NET_UNKNOWN */
 145                .fatal = 1,
 146        },
 147        {
 148                .errno = EHOSTDOWN,     /* ICMP_HOST_UNKNOWN */
 149                .fatal = 1,
 150        },
 151        {
 152                .errno = ENONET,        /* ICMP_HOST_ISOLATED */
 153                .fatal = 1,
 154        },
 155        {
 156                .errno = ENETUNREACH,   /* ICMP_NET_ANO */
 157                .fatal = 1,
 158        },
 159        {
 160                .errno = EHOSTUNREACH,  /* ICMP_HOST_ANO */
 161                .fatal = 1,
 162        },
 163        {
 164                .errno = ENETUNREACH,   /* ICMP_NET_UNR_TOS */
 165                .fatal = 0,
 166        },
 167        {
 168                .errno = EHOSTUNREACH,  /* ICMP_HOST_UNR_TOS */
 169                .fatal = 0,
 170        },
 171        {
 172                .errno = EHOSTUNREACH,  /* ICMP_PKT_FILTERED */
 173                .fatal = 1,
 174        },
 175        {
 176                .errno = EHOSTUNREACH,  /* ICMP_PREC_VIOLATION */
 177                .fatal = 1,
 178        },
 179        {
 180                .errno = EHOSTUNREACH,  /* ICMP_PREC_CUTOFF */
 181                .fatal = 1,
 182        },
 183};
 184EXPORT_SYMBOL(icmp_err_convert);
 185
 186/*
 187 *      ICMP control array. This specifies what to do with each ICMP.
 188 */
 189
 190struct icmp_control {
 191        void (*handler)(struct sk_buff *skb);
 192        short   error;          /* This ICMP is classed as an error message */
 193};
 194
 195static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
 196
 197/*
 198 *      The ICMP socket(s). This is the most convenient way to flow control
 199 *      our ICMP output as well as maintain a clean interface throughout
 200 *      all layers. All Socketless IP sends will soon be gone.
 201 *
 202 *      On SMP we have one ICMP socket per-cpu.
 203 */
 204static struct sock *icmp_sk(struct net *net)
 205{
 206        return net->ipv4.icmp_sk[smp_processor_id()];
 207}
 208
 209static inline struct sock *icmp_xmit_lock(struct net *net)
 210{
 211        struct sock *sk;
 212
 213        local_bh_disable();
 214
 215        sk = icmp_sk(net);
 216
 217        if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
 218                /* This can happen if the output path signals a
 219                 * dst_link_failure() for an outgoing ICMP packet.
 220                 */
 221                local_bh_enable();
 222                return NULL;
 223        }
 224        return sk;
 225}
 226
 227static inline void icmp_xmit_unlock(struct sock *sk)
 228{
 229        spin_unlock_bh(&sk->sk_lock.slock);
 230}
 231
 232/*
 233 *      Send an ICMP frame.
 234 */
 235
 236/*
 237 *      Check transmit rate limitation for given message.
 238 *      The rate information is held in the destination cache now.
 239 *      This function is generic and could be used for other purposes
 240 *      too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
 241 *
 242 *      Note that the same dst_entry fields are modified by functions in
 243 *      route.c too, but these work for packet destinations while xrlim_allow
 244 *      works for icmp destinations. This means the rate limiting information
 245 *      for one "ip object" is shared - and these ICMPs are twice limited:
 246 *      by source and by destination.
 247 *
 248 *      RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
 249 *                        SHOULD allow setting of rate limits
 250 *
 251 *      Shared between ICMPv4 and ICMPv6.
 252 */
 253#define XRLIM_BURST_FACTOR 6
 254int xrlim_allow(struct dst_entry *dst, int timeout)
 255{
 256        unsigned long now, token = dst->rate_tokens;
 257        int rc = 0;
 258
 259        now = jiffies;
 260        token += now - dst->rate_last;
 261        dst->rate_last = now;
 262        if (token > XRLIM_BURST_FACTOR * timeout)
 263                token = XRLIM_BURST_FACTOR * timeout;
 264        if (token >= timeout) {
 265                token -= timeout;
 266                rc = 1;
 267        }
 268        dst->rate_tokens = token;
 269        return rc;
 270}
 271EXPORT_SYMBOL(xrlim_allow);
 272
 273static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
 274                int type, int code)
 275{
 276        struct dst_entry *dst = &rt->dst;
 277        int rc = 1;
 278
 279        if (type > NR_ICMP_TYPES)
 280                goto out;
 281
 282        /* Don't limit PMTU discovery. */
 283        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
 284                goto out;
 285
 286        /* No rate limit on loopback */
 287        if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
 288                goto out;
 289
 290        /* Limit if icmp type is enabled in ratemask. */
 291        if ((1 << type) & net->ipv4.sysctl_icmp_ratemask)
 292                rc = xrlim_allow(dst, net->ipv4.sysctl_icmp_ratelimit);
 293out:
 294        return rc;
 295}
 296
 297/*
 298 *      Maintain the counters used in the SNMP statistics for outgoing ICMP
 299 */
 300void icmp_out_count(struct net *net, unsigned char type)
 301{
 302        ICMPMSGOUT_INC_STATS(net, type);
 303        ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS);
 304}
 305
 306/*
 307 *      Checksum each fragment, and on the first include the headers and final
 308 *      checksum.
 309 */
 310static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
 311                          struct sk_buff *skb)
 312{
 313        struct icmp_bxm *icmp_param = (struct icmp_bxm *)from;
 314        __wsum csum;
 315
 316        csum = skb_copy_and_csum_bits(icmp_param->skb,
 317                                      icmp_param->offset + offset,
 318                                      to, len, 0);
 319
 320        skb->csum = csum_block_add(skb->csum, csum, odd);
 321        if (icmp_pointers[icmp_param->data.icmph.type].error)
 322                nf_ct_attach(skb, icmp_param->skb);
 323        return 0;
 324}
 325
 326static void icmp_push_reply(struct icmp_bxm *icmp_param,
 327                            struct ipcm_cookie *ipc, struct rtable **rt)
 328{
 329        struct sock *sk;
 330        struct sk_buff *skb;
 331
 332        sk = icmp_sk(dev_net((*rt)->dst.dev));
 333        if (ip_append_data(sk, icmp_glue_bits, icmp_param,
 334                           icmp_param->data_len+icmp_param->head_len,
 335                           icmp_param->head_len,
 336                           ipc, rt, MSG_DONTWAIT) < 0) {
 337                ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS);
 338                ip_flush_pending_frames(sk);
 339        } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
 340                struct icmphdr *icmph = icmp_hdr(skb);
 341                __wsum csum = 0;
 342                struct sk_buff *skb1;
 343
 344                skb_queue_walk(&sk->sk_write_queue, skb1) {
 345                        csum = csum_add(csum, skb1->csum);
 346                }
 347                csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
 348                                                 (char *)icmph,
 349                                                 icmp_param->head_len, csum);
 350                icmph->checksum = csum_fold(csum);
 351                skb->ip_summed = CHECKSUM_NONE;
 352                ip_push_pending_frames(sk);
 353        }
 354}
 355
 356/*
 357 *      Driving logic for building and sending ICMP messages.
 358 */
 359
 360static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
 361{
 362        struct ipcm_cookie ipc;
 363        struct rtable *rt = skb_rtable(skb);
 364        struct net *net = dev_net(rt->dst.dev);
 365        struct sock *sk;
 366        struct inet_sock *inet;
 367        __be32 daddr;
 368
 369        if (ip_options_echo(&icmp_param->replyopts, skb))
 370                return;
 371
 372        sk = icmp_xmit_lock(net);
 373        if (sk == NULL)
 374                return;
 375        inet = inet_sk(sk);
 376
 377        icmp_param->data.icmph.checksum = 0;
 378
 379        inet->tos = ip_hdr(skb)->tos;
 380        daddr = ipc.addr = rt->rt_src;
 381        ipc.opt = NULL;
 382        ipc.tx_flags = 0;
 383        if (icmp_param->replyopts.optlen) {
 384                ipc.opt = &icmp_param->replyopts;
 385                if (ipc.opt->srr)
 386                        daddr = icmp_param->replyopts.faddr;
 387        }
 388        {
 389                struct flowi fl = { .fl4_dst= daddr,
 390                                    .fl4_src = rt->rt_spec_dst,
 391                                    .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
 392                                    .proto = IPPROTO_ICMP };
 393                security_skb_classify_flow(skb, &fl);
 394                if (ip_route_output_key(net, &rt, &fl))
 395                        goto out_unlock;
 396        }
 397        if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type,
 398                               icmp_param->data.icmph.code))
 399                icmp_push_reply(icmp_param, &ipc, &rt);
 400        ip_rt_put(rt);
 401out_unlock:
 402        icmp_xmit_unlock(sk);
 403}
 404
 405
 406/*
 407 *      Send an ICMP message in response to a situation
 408 *
 409 *      RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header.
 410 *                MAY send more (we do).
 411 *                      MUST NOT change this header information.
 412 *                      MUST NOT reply to a multicast/broadcast IP address.
 413 *                      MUST NOT reply to a multicast/broadcast MAC address.
 414 *                      MUST reply to only the first fragment.
 415 */
 416
 417void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
 418{
 419        struct iphdr *iph;
 420        int room;
 421        struct icmp_bxm icmp_param;
 422        struct rtable *rt = skb_rtable(skb_in);
 423        struct ipcm_cookie ipc;
 424        __be32 saddr;
 425        u8  tos;
 426        struct net *net;
 427        struct sock *sk;
 428
 429        if (!rt)
 430                goto out;
 431        net = dev_net(rt->dst.dev);
 432
 433        /*
 434         *      Find the original header. It is expected to be valid, of course.
 435         *      Check this, icmp_send is called from the most obscure devices
 436         *      sometimes.
 437         */
 438        iph = ip_hdr(skb_in);
 439
 440        if ((u8 *)iph < skb_in->head ||
 441            (skb_in->network_header + sizeof(*iph)) > skb_in->tail)
 442                goto out;
 443
 444        /*
 445         *      No replies to physical multicast/broadcast
 446         */
 447        if (skb_in->pkt_type != PACKET_HOST)
 448                goto out;
 449
 450        /*
 451         *      Now check at the protocol level
 452         */
 453        if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
 454                goto out;
 455
 456        /*
 457         *      Only reply to fragment 0. We byte re-order the constant
 458         *      mask for efficiency.
 459         */
 460        if (iph->frag_off & htons(IP_OFFSET))
 461                goto out;
 462
 463        /*
 464         *      If we send an ICMP error to an ICMP error a mess would result..
 465         */
 466        if (icmp_pointers[type].error) {
 467                /*
 468                 *      We are an error, check if we are replying to an
 469                 *      ICMP error
 470                 */
 471                if (iph->protocol == IPPROTO_ICMP) {
 472                        u8 _inner_type, *itp;
 473
 474                        itp = skb_header_pointer(skb_in,
 475                                                 skb_network_header(skb_in) +
 476                                                 (iph->ihl << 2) +
 477                                                 offsetof(struct icmphdr,
 478                                                          type) -
 479                                                 skb_in->data,
 480                                                 sizeof(_inner_type),
 481                                                 &_inner_type);
 482                        if (itp == NULL)
 483                                goto out;
 484
 485                        /*
 486                         *      Assume any unknown ICMP type is an error. This
 487                         *      isn't specified by the RFC, but think about it..
 488                         */
 489                        if (*itp > NR_ICMP_TYPES ||
 490                            icmp_pointers[*itp].error)
 491                                goto out;
 492                }
 493        }
 494
 495        sk = icmp_xmit_lock(net);
 496        if (sk == NULL)
 497                return;
 498
 499        /*
 500         *      Construct source address and options.
 501         */
 502
 503        saddr = iph->daddr;
 504        if (!(rt->rt_flags & RTCF_LOCAL)) {
 505                struct net_device *dev = NULL;
 506
 507                rcu_read_lock();
 508                if (rt_is_input_route(rt) &&
 509                    net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
 510                        dev = dev_get_by_index_rcu(net, rt->fl.iif);
 511
 512                if (dev)
 513                        saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
 514                else
 515                        saddr = 0;
 516                rcu_read_unlock();
 517        }
 518
 519        tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
 520                                           IPTOS_PREC_INTERNETCONTROL) :
 521                                          iph->tos;
 522
 523        if (ip_options_echo(&icmp_param.replyopts, skb_in))
 524                goto out_unlock;
 525
 526
 527        /*
 528         *      Prepare data for ICMP header.
 529         */
 530
 531        icmp_param.data.icmph.type       = type;
 532        icmp_param.data.icmph.code       = code;
 533        icmp_param.data.icmph.un.gateway = info;
 534        icmp_param.data.icmph.checksum   = 0;
 535        icmp_param.skb    = skb_in;
 536        icmp_param.offset = skb_network_offset(skb_in);
 537        inet_sk(sk)->tos = tos;
 538        ipc.addr = iph->saddr;
 539        ipc.opt = &icmp_param.replyopts;
 540        ipc.tx_flags = 0;
 541
 542        {
 543                struct flowi fl = {
 544                        .fl4_dst = icmp_param.replyopts.srr ?
 545                                   icmp_param.replyopts.faddr : iph->saddr,
 546                        .fl4_src = saddr,
 547                        .fl4_tos = RT_TOS(tos),
 548                        .proto = IPPROTO_ICMP,
 549                        .fl_icmp_type = type,
 550                        .fl_icmp_code = code,
 551                };
 552                int err;
 553                struct rtable *rt2;
 554
 555                security_skb_classify_flow(skb_in, &fl);
 556                if (__ip_route_output_key(net, &rt, &fl))
 557                        goto out_unlock;
 558
 559                /* No need to clone since we're just using its address. */
 560                rt2 = rt;
 561
 562                if (!fl.nl_u.ip4_u.saddr)
 563                        fl.nl_u.ip4_u.saddr = rt->rt_src;
 564
 565                err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0);
 566                switch (err) {
 567                case 0:
 568                        if (rt != rt2)
 569                                goto route_done;
 570                        break;
 571                case -EPERM:
 572                        rt = NULL;
 573                        break;
 574                default:
 575                        goto out_unlock;
 576                }
 577
 578                if (xfrm_decode_session_reverse(skb_in, &fl, AF_INET))
 579                        goto relookup_failed;
 580
 581                if (inet_addr_type(net, fl.fl4_src) == RTN_LOCAL)
 582                        err = __ip_route_output_key(net, &rt2, &fl);
 583                else {
 584                        struct flowi fl2 = {};
 585                        unsigned long orefdst;
 586
 587                        fl2.fl4_dst = fl.fl4_src;
 588                        if (ip_route_output_key(net, &rt2, &fl2))
 589                                goto relookup_failed;
 590
 591                        /* Ugh! */
 592                        orefdst = skb_in->_skb_refdst; /* save old refdst */
 593                        err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
 594                                             RT_TOS(tos), rt2->dst.dev);
 595
 596                        dst_release(&rt2->dst);
 597                        rt2 = skb_rtable(skb_in);
 598                        skb_in->_skb_refdst = orefdst; /* restore old refdst */
 599                }
 600
 601                if (err)
 602                        goto relookup_failed;
 603
 604                err = xfrm_lookup(net, (struct dst_entry **)&rt2, &fl, NULL,
 605                                  XFRM_LOOKUP_ICMP);
 606                switch (err) {
 607                case 0:
 608                        dst_release(&rt->dst);
 609                        rt = rt2;
 610                        break;
 611                case -EPERM:
 612                        goto ende;
 613                default:
 614relookup_failed:
 615                        if (!rt)
 616                                goto out_unlock;
 617                        break;
 618                }
 619        }
 620
 621route_done:
 622        if (!icmpv4_xrlim_allow(net, rt, type, code))
 623                goto ende;
 624
 625        /* RFC says return as much as we can without exceeding 576 bytes. */
 626
 627        room = dst_mtu(&rt->dst);
 628        if (room > 576)
 629                room = 576;
 630        room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen;
 631        room -= sizeof(struct icmphdr);
 632
 633        icmp_param.data_len = skb_in->len - icmp_param.offset;
 634        if (icmp_param.data_len > room)
 635                icmp_param.data_len = room;
 636        icmp_param.head_len = sizeof(struct icmphdr);
 637
 638        icmp_push_reply(&icmp_param, &ipc, &rt);
 639ende:
 640        ip_rt_put(rt);
 641out_unlock:
 642        icmp_xmit_unlock(sk);
 643out:;
 644}
 645EXPORT_SYMBOL(icmp_send);
 646
 647
 648/*
 649 *      Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, and ICMP_QUENCH.
 650 */
 651
 652static void icmp_unreach(struct sk_buff *skb)
 653{
 654        struct iphdr *iph;
 655        struct icmphdr *icmph;
 656        int hash, protocol;
 657        const struct net_protocol *ipprot;
 658        u32 info = 0;
 659        struct net *net;
 660
 661        net = dev_net(skb_dst(skb)->dev);
 662
 663        /*
 664         *      Incomplete header ?
 665         *      Only checks for the IP header, there should be an
 666         *      additional check for longer headers in upper levels.
 667         */
 668
 669        if (!pskb_may_pull(skb, sizeof(struct iphdr)))
 670                goto out_err;
 671
 672        icmph = icmp_hdr(skb);
 673        iph   = (struct iphdr *)skb->data;
 674
 675        if (iph->ihl < 5) /* Mangled header, drop. */
 676                goto out_err;
 677
 678        if (icmph->type == ICMP_DEST_UNREACH) {
 679                switch (icmph->code & 15) {
 680                case ICMP_NET_UNREACH:
 681                case ICMP_HOST_UNREACH:
 682                case ICMP_PROT_UNREACH:
 683                case ICMP_PORT_UNREACH:
 684                        break;
 685                case ICMP_FRAG_NEEDED:
 686                        if (ipv4_config.no_pmtu_disc) {
 687                                LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: fragmentation needed and DF set.\n",
 688                                               &iph->daddr);
 689                        } else {
 690                                info = ip_rt_frag_needed(net, iph,
 691                                                         ntohs(icmph->un.frag.mtu),
 692                                                         skb->dev);
 693                                if (!info)
 694                                        goto out;
 695                        }
 696                        break;
 697                case ICMP_SR_FAILED:
 698                        LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: Source Route Failed.\n",
 699                                       &iph->daddr);
 700                        break;
 701                default:
 702                        break;
 703                }
 704                if (icmph->code > NR_ICMP_UNREACH)
 705                        goto out;
 706        } else if (icmph->type == ICMP_PARAMETERPROB)
 707                info = ntohl(icmph->un.gateway) >> 24;
 708
 709        /*
 710         *      Throw it at our lower layers
 711         *
 712         *      RFC 1122: 3.2.2 MUST extract the protocol ID from the passed
 713         *                header.
 714         *      RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the
 715         *                transport layer.
 716         *      RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to
 717         *                transport layer.
 718         */
 719
 720        /*
 721         *      Check the other end isnt violating RFC 1122. Some routers send
 722         *      bogus responses to broadcast frames. If you see this message
 723         *      first check your netmask matches at both ends, if it does then
 724         *      get the other vendor to fix their kit.
 725         */
 726
 727        if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
 728            inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
 729                if (net_ratelimit())
 730                        printk(KERN_WARNING "%pI4 sent an invalid ICMP "
 731                                            "type %u, code %u "
 732                                            "error to a broadcast: %pI4 on %s\n",
 733                               &ip_hdr(skb)->saddr,
 734                               icmph->type, icmph->code,
 735                               &iph->daddr,
 736                               skb->dev->name);
 737                goto out;
 738        }
 739
 740        /* Checkin full IP header plus 8 bytes of protocol to
 741         * avoid additional coding at protocol handlers.
 742         */
 743        if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
 744                goto out;
 745
 746        iph = (struct iphdr *)skb->data;
 747        protocol = iph->protocol;
 748
 749        /*
 750         *      Deliver ICMP message to raw sockets. Pretty useless feature?
 751         */
 752        raw_icmp_error(skb, protocol, info);
 753
 754        hash = protocol & (MAX_INET_PROTOS - 1);
 755        rcu_read_lock();
 756        ipprot = rcu_dereference(inet_protos[hash]);
 757        if (ipprot && ipprot->err_handler)
 758                ipprot->err_handler(skb, info);
 759        rcu_read_unlock();
 760
 761out:
 762        return;
 763out_err:
 764        ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
 765        goto out;
 766}
 767
 768
 769/*
 770 *      Handle ICMP_REDIRECT.
 771 */
 772
 773static void icmp_redirect(struct sk_buff *skb)
 774{
 775        struct iphdr *iph;
 776
 777        if (skb->len < sizeof(struct iphdr))
 778                goto out_err;
 779
 780        /*
 781         *      Get the copied header of the packet that caused the redirect
 782         */
 783        if (!pskb_may_pull(skb, sizeof(struct iphdr)))
 784                goto out;
 785
 786        iph = (struct iphdr *)skb->data;
 787
 788        switch (icmp_hdr(skb)->code & 7) {
 789        case ICMP_REDIR_NET:
 790        case ICMP_REDIR_NETTOS:
 791                /*
 792                 * As per RFC recommendations now handle it as a host redirect.
 793                 */
 794        case ICMP_REDIR_HOST:
 795        case ICMP_REDIR_HOSTTOS:
 796                ip_rt_redirect(ip_hdr(skb)->saddr, iph->daddr,
 797                               icmp_hdr(skb)->un.gateway,
 798                               iph->saddr, skb->dev);
 799                break;
 800        }
 801out:
 802        return;
 803out_err:
 804        ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
 805        goto out;
 806}
 807
 808/*
 809 *      Handle ICMP_ECHO ("ping") requests.
 810 *
 811 *      RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
 812 *                requests.
 813 *      RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be
 814 *                included in the reply.
 815 *      RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring
 816 *                echo requests, MUST have default=NOT.
 817 *      See also WRT handling of options once they are done and working.
 818 */
 819
 820static void icmp_echo(struct sk_buff *skb)
 821{
 822        struct net *net;
 823
 824        net = dev_net(skb_dst(skb)->dev);
 825        if (!net->ipv4.sysctl_icmp_echo_ignore_all) {
 826                struct icmp_bxm icmp_param;
 827
 828                icmp_param.data.icmph      = *icmp_hdr(skb);
 829                icmp_param.data.icmph.type = ICMP_ECHOREPLY;
 830                icmp_param.skb             = skb;
 831                icmp_param.offset          = 0;
 832                icmp_param.data_len        = skb->len;
 833                icmp_param.head_len        = sizeof(struct icmphdr);
 834                icmp_reply(&icmp_param, skb);
 835        }
 836}
 837
 838/*
 839 *      Handle ICMP Timestamp requests.
 840 *      RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests.
 841 *                SHOULD be in the kernel for minimum random latency.
 842 *                MUST be accurate to a few minutes.
 843 *                MUST be updated at least at 15Hz.
 844 */
 845static void icmp_timestamp(struct sk_buff *skb)
 846{
 847        struct timespec tv;
 848        struct icmp_bxm icmp_param;
 849        /*
 850         *      Too short.
 851         */
 852        if (skb->len < 4)
 853                goto out_err;
 854
 855        /*
 856         *      Fill in the current time as ms since midnight UT:
 857         */
 858        getnstimeofday(&tv);
 859        icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC +
 860                                         tv.tv_nsec / NSEC_PER_MSEC);
 861        icmp_param.data.times[2] = icmp_param.data.times[1];
 862        if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4))
 863                BUG();
 864        icmp_param.data.icmph      = *icmp_hdr(skb);
 865        icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY;
 866        icmp_param.data.icmph.code = 0;
 867        icmp_param.skb             = skb;
 868        icmp_param.offset          = 0;
 869        icmp_param.data_len        = 0;
 870        icmp_param.head_len        = sizeof(struct icmphdr) + 12;
 871        icmp_reply(&icmp_param, skb);
 872out:
 873        return;
 874out_err:
 875        ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
 876        goto out;
 877}
 878
 879
 880/*
 881 *      Handle ICMP_ADDRESS_MASK requests.  (RFC950)
 882 *
 883 * RFC1122 (3.2.2.9).  A host MUST only send replies to
 884 * ADDRESS_MASK requests if it's been configured as an address mask
 885 * agent.  Receiving a request doesn't constitute implicit permission to
 886 * act as one. Of course, implementing this correctly requires (SHOULD)
 887 * a way to turn the functionality on and off.  Another one for sysctl(),
 888 * I guess. -- MS
 889 *
 890 * RFC1812 (4.3.3.9).   A router MUST implement it.
 891 *                      A router SHOULD have switch turning it on/off.
 892 *                      This switch MUST be ON by default.
 893 *
 894 * Gratuitous replies, zero-source replies are not implemented,
 895 * that complies with RFC. DO NOT implement them!!! All the idea
 896 * of broadcast addrmask replies as specified in RFC950 is broken.
 897 * The problem is that it is not uncommon to have several prefixes
 898 * on one physical interface. Moreover, addrmask agent can even be
 899 * not aware of existing another prefixes.
 900 * If source is zero, addrmask agent cannot choose correct prefix.
 901 * Gratuitous mask announcements suffer from the same problem.
 902 * RFC1812 explains it, but still allows to use ADDRMASK,
 903 * that is pretty silly. --ANK
 904 *
 905 * All these rules are so bizarre, that I removed kernel addrmask
 906 * support at all. It is wrong, it is obsolete, nobody uses it in
 907 * any case. --ANK
 908 *
 909 * Furthermore you can do it with a usermode address agent program
 910 * anyway...
 911 */
 912
 913static void icmp_address(struct sk_buff *skb)
 914{
 915#if 0
 916        if (net_ratelimit())
 917                printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n");
 918#endif
 919}
 920
 921/*
 922 * RFC1812 (4.3.3.9).   A router SHOULD listen all replies, and complain
 923 *                      loudly if an inconsistency is found.
 924 * called with rcu_read_lock()
 925 */
 926
 927static void icmp_address_reply(struct sk_buff *skb)
 928{
 929        struct rtable *rt = skb_rtable(skb);
 930        struct net_device *dev = skb->dev;
 931        struct in_device *in_dev;
 932        struct in_ifaddr *ifa;
 933
 934        if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC))
 935                return;
 936
 937        in_dev = __in_dev_get_rcu(dev);
 938        if (!in_dev)
 939                return;
 940
 941        if (in_dev->ifa_list &&
 942            IN_DEV_LOG_MARTIANS(in_dev) &&
 943            IN_DEV_FORWARD(in_dev)) {
 944                __be32 _mask, *mp;
 945
 946                mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask);
 947                BUG_ON(mp == NULL);
 948                for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
 949                        if (*mp == ifa->ifa_mask &&
 950                            inet_ifa_match(rt->rt_src, ifa))
 951                                break;
 952                }
 953                if (!ifa && net_ratelimit()) {
 954                        printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n",
 955                               mp, dev->name, &rt->rt_src);
 956                }
 957        }
 958}
 959
 960static void icmp_discard(struct sk_buff *skb)
 961{
 962}
 963
 964/*
 965 *      Deal with incoming ICMP packets.
 966 */
 967int icmp_rcv(struct sk_buff *skb)
 968{
 969        struct icmphdr *icmph;
 970        struct rtable *rt = skb_rtable(skb);
 971        struct net *net = dev_net(rt->dst.dev);
 972
 973        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
 974                struct sec_path *sp = skb_sec_path(skb);
 975                int nh;
 976
 977                if (!(sp && sp->xvec[sp->len - 1]->props.flags &
 978                                 XFRM_STATE_ICMP))
 979                        goto drop;
 980
 981                if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr)))
 982                        goto drop;
 983
 984                nh = skb_network_offset(skb);
 985                skb_set_network_header(skb, sizeof(*icmph));
 986
 987                if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
 988                        goto drop;
 989
 990                skb_set_network_header(skb, nh);
 991        }
 992
 993        ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS);
 994
 995        switch (skb->ip_summed) {
 996        case CHECKSUM_COMPLETE:
 997                if (!csum_fold(skb->csum))
 998                        break;
 999                /* fall through */
1000        case CHECKSUM_NONE:
1001                skb->csum = 0;
1002                if (__skb_checksum_complete(skb))
1003                        goto error;
1004        }
1005
1006        if (!pskb_pull(skb, sizeof(*icmph)))
1007                goto error;
1008
1009        icmph = icmp_hdr(skb);
1010
1011        ICMPMSGIN_INC_STATS_BH(net, icmph->type);
1012        /*
1013         *      18 is the highest 'known' ICMP type. Anything else is a mystery
1014         *
1015         *      RFC 1122: 3.2.2  Unknown ICMP messages types MUST be silently
1016         *                discarded.
1017         */
1018        if (icmph->type > NR_ICMP_TYPES)
1019                goto error;
1020
1021
1022        /*
1023         *      Parse the ICMP message
1024         */
1025
1026        if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
1027                /*
1028                 *      RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
1029                 *        silently ignored (we let user decide with a sysctl).
1030                 *      RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently
1031                 *        discarded if to broadcast/multicast.
1032                 */
1033                if ((icmph->type == ICMP_ECHO ||
1034                     icmph->type == ICMP_TIMESTAMP) &&
1035                    net->ipv4.sysctl_icmp_echo_ignore_broadcasts) {
1036                        goto error;
1037                }
1038                if (icmph->type != ICMP_ECHO &&
1039                    icmph->type != ICMP_TIMESTAMP &&
1040                    icmph->type != ICMP_ADDRESS &&
1041                    icmph->type != ICMP_ADDRESSREPLY) {
1042                        goto error;
1043                }
1044        }
1045
1046        icmp_pointers[icmph->type].handler(skb);
1047
1048drop:
1049        kfree_skb(skb);
1050        return 0;
1051error:
1052        ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1053        goto drop;
1054}
1055
1056/*
1057 *      This table is the definition of how we handle ICMP.
1058 */
1059static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
1060        [ICMP_ECHOREPLY] = {
1061                .handler = icmp_discard,
1062        },
1063        [1] = {
1064                .handler = icmp_discard,
1065                .error = 1,
1066        },
1067        [2] = {
1068                .handler = icmp_discard,
1069                .error = 1,
1070        },
1071        [ICMP_DEST_UNREACH] = {
1072                .handler = icmp_unreach,
1073                .error = 1,
1074        },
1075        [ICMP_SOURCE_QUENCH] = {
1076                .handler = icmp_unreach,
1077                .error = 1,
1078        },
1079        [ICMP_REDIRECT] = {
1080                .handler = icmp_redirect,
1081                .error = 1,
1082        },
1083        [6] = {
1084                .handler = icmp_discard,
1085                .error = 1,
1086        },
1087        [7] = {
1088                .handler = icmp_discard,
1089                .error = 1,
1090        },
1091        [ICMP_ECHO] = {
1092                .handler = icmp_echo,
1093        },
1094        [9] = {
1095                .handler = icmp_discard,
1096                .error = 1,
1097        },
1098        [10] = {
1099                .handler = icmp_discard,
1100                .error = 1,
1101        },
1102        [ICMP_TIME_EXCEEDED] = {
1103                .handler = icmp_unreach,
1104                .error = 1,
1105        },
1106        [ICMP_PARAMETERPROB] = {
1107                .handler = icmp_unreach,
1108                .error = 1,
1109        },
1110        [ICMP_TIMESTAMP] = {
1111                .handler = icmp_timestamp,
1112        },
1113        [ICMP_TIMESTAMPREPLY] = {
1114                .handler = icmp_discard,
1115        },
1116        [ICMP_INFO_REQUEST] = {
1117                .handler = icmp_discard,
1118        },
1119        [ICMP_INFO_REPLY] = {
1120                .handler = icmp_discard,
1121        },
1122        [ICMP_ADDRESS] = {
1123                .handler = icmp_address,
1124        },
1125        [ICMP_ADDRESSREPLY] = {
1126                .handler = icmp_address_reply,
1127        },
1128};
1129
1130static void __net_exit icmp_sk_exit(struct net *net)
1131{
1132        int i;
1133
1134        for_each_possible_cpu(i)
1135                inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
1136        kfree(net->ipv4.icmp_sk);
1137        net->ipv4.icmp_sk = NULL;
1138}
1139
1140static int __net_init icmp_sk_init(struct net *net)
1141{
1142        int i, err;
1143
1144        net->ipv4.icmp_sk =
1145                kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
1146        if (net->ipv4.icmp_sk == NULL)
1147                return -ENOMEM;
1148
1149        for_each_possible_cpu(i) {
1150                struct sock *sk;
1151
1152                err = inet_ctl_sock_create(&sk, PF_INET,
1153                                           SOCK_RAW, IPPROTO_ICMP, net);
1154                if (err < 0)
1155                        goto fail;
1156
1157                net->ipv4.icmp_sk[i] = sk;
1158
1159                /* Enough space for 2 64K ICMP packets, including
1160                 * sk_buff struct overhead.
1161                 */
1162                sk->sk_sndbuf =
1163                        (2 * ((64 * 1024) + sizeof(struct sk_buff)));
1164
1165                /*
1166                 * Speedup sock_wfree()
1167                 */
1168                sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1169                inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT;
1170        }
1171
1172        /* Control parameters for ECHO replies. */
1173        net->ipv4.sysctl_icmp_echo_ignore_all = 0;
1174        net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1;
1175
1176        /* Control parameter - ignore bogus broadcast responses? */
1177        net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1;
1178
1179        /*
1180         *      Configurable global rate limit.
1181         *
1182         *      ratelimit defines tokens/packet consumed for dst->rate_token
1183         *      bucket ratemask defines which icmp types are ratelimited by
1184         *      setting it's bit position.
1185         *
1186         *      default:
1187         *      dest unreachable (3), source quench (4),
1188         *      time exceeded (11), parameter problem (12)
1189         */
1190
1191        net->ipv4.sysctl_icmp_ratelimit = 1 * HZ;
1192        net->ipv4.sysctl_icmp_ratemask = 0x1818;
1193        net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
1194
1195        return 0;
1196
1197fail:
1198        for_each_possible_cpu(i)
1199                inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
1200        kfree(net->ipv4.icmp_sk);
1201        return err;
1202}
1203
1204static struct pernet_operations __net_initdata icmp_sk_ops = {
1205       .init = icmp_sk_init,
1206       .exit = icmp_sk_exit,
1207};
1208
1209int __init icmp_init(void)
1210{
1211        return register_pernet_subsys(&icmp_sk_ops);
1212}
1213