linux/net/ipv6/reassembly.c
<<
>>
Prefs
   1/*
   2 *      IPv6 fragment reassembly
   3 *      Linux INET6 implementation
   4 *
   5 *      Authors:
   6 *      Pedro Roque             <roque@di.fc.ul.pt>
   7 *
   8 *      $Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
   9 *
  10 *      Based on: net/ipv4/ip_fragment.c
  11 *
  12 *      This program is free software; you can redistribute it and/or
  13 *      modify it under the terms of the GNU General Public License
  14 *      as published by the Free Software Foundation; either version
  15 *      2 of the License, or (at your option) any later version.
  16 */
  17
  18/*
  19 *      Fixes:
  20 *      Andi Kleen      Make it work with multiple hosts.
  21 *                      More RFC compliance.
  22 *
  23 *      Horst von Brand Add missing #include <linux/string.h>
  24 *      Alexey Kuznetsov        SMP races, threading, cleanup.
  25 *      Patrick McHardy         LRU queue of frag heads for evictor.
  26 *      Mitsuru KANDA @USAGI    Register inet6_protocol{}.
  27 *      David Stevens and
  28 *      YOSHIFUJI,H. @USAGI     Always remove fragment header to
  29 *                              calculate ICV correctly.
  30 */
  31#include <linux/errno.h>
  32#include <linux/types.h>
  33#include <linux/string.h>
  34#include <linux/socket.h>
  35#include <linux/sockios.h>
  36#include <linux/jiffies.h>
  37#include <linux/net.h>
  38#include <linux/list.h>
  39#include <linux/netdevice.h>
  40#include <linux/in6.h>
  41#include <linux/ipv6.h>
  42#include <linux/icmpv6.h>
  43#include <linux/random.h>
  44#include <linux/jhash.h>
  45#include <linux/skbuff.h>
  46
  47#include <net/sock.h>
  48#include <net/snmp.h>
  49
  50#include <net/ipv6.h>
  51#include <net/ip6_route.h>
  52#include <net/protocol.h>
  53#include <net/transp_v6.h>
  54#include <net/rawv6.h>
  55#include <net/ndisc.h>
  56#include <net/addrconf.h>
  57#include <net/inet_frag.h>
  58
  59struct ip6frag_skb_cb
  60{
  61        struct inet6_skb_parm   h;
  62        int                     offset;
  63};
  64
  65#define FRAG6_CB(skb)   ((struct ip6frag_skb_cb*)((skb)->cb))
  66
  67
  68/*
  69 *      Equivalent of ipv4 struct ipq
  70 */
  71
  72struct frag_queue
  73{
  74        struct inet_frag_queue  q;
  75
  76        __be32                  id;             /* fragment id          */
  77        struct in6_addr         saddr;
  78        struct in6_addr         daddr;
  79
  80        int                     iif;
  81        unsigned int            csum;
  82        __u16                   nhoffset;
  83};
  84
  85struct inet_frags_ctl ip6_frags_ctl __read_mostly = {
  86        .high_thresh     = 256 * 1024,
  87        .low_thresh      = 192 * 1024,
  88        .timeout         = IPV6_FRAG_TIMEOUT,
  89        .secret_interval = 10 * 60 * HZ,
  90};
  91
  92static struct inet_frags ip6_frags;
  93
  94int ip6_frag_nqueues(void)
  95{
  96        return ip6_frags.nqueues;
  97}
  98
  99int ip6_frag_mem(void)
 100{
 101        return atomic_read(&ip6_frags.mem);
 102}
 103
 104static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
 105                          struct net_device *dev);
 106
 107/*
 108 * callers should be careful not to use the hash value outside the ipfrag_lock
 109 * as doing so could race with ipfrag_hash_rnd being recalculated.
 110 */
 111static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
 112                               struct in6_addr *daddr)
 113{
 114        u32 a, b, c;
 115
 116        a = (__force u32)saddr->s6_addr32[0];
 117        b = (__force u32)saddr->s6_addr32[1];
 118        c = (__force u32)saddr->s6_addr32[2];
 119
 120        a += JHASH_GOLDEN_RATIO;
 121        b += JHASH_GOLDEN_RATIO;
 122        c += ip6_frags.rnd;
 123        __jhash_mix(a, b, c);
 124
 125        a += (__force u32)saddr->s6_addr32[3];
 126        b += (__force u32)daddr->s6_addr32[0];
 127        c += (__force u32)daddr->s6_addr32[1];
 128        __jhash_mix(a, b, c);
 129
 130        a += (__force u32)daddr->s6_addr32[2];
 131        b += (__force u32)daddr->s6_addr32[3];
 132        c += (__force u32)id;
 133        __jhash_mix(a, b, c);
 134
 135        return c & (INETFRAGS_HASHSZ - 1);
 136}
 137
 138static unsigned int ip6_hashfn(struct inet_frag_queue *q)
 139{
 140        struct frag_queue *fq;
 141
 142        fq = container_of(q, struct frag_queue, q);
 143        return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr);
 144}
 145
 146int ip6_frag_match(struct inet_frag_queue *q, void *a)
 147{
 148        struct frag_queue *fq;
 149        struct ip6_create_arg *arg = a;
 150
 151        fq = container_of(q, struct frag_queue, q);
 152        return (fq->id == arg->id &&
 153                        ipv6_addr_equal(&fq->saddr, arg->src) &&
 154                        ipv6_addr_equal(&fq->daddr, arg->dst));
 155}
 156EXPORT_SYMBOL(ip6_frag_match);
 157
 158/* Memory Tracking Functions. */
 159static inline void frag_kfree_skb(struct sk_buff *skb, int *work)
 160{
 161        if (work)
 162                *work -= skb->truesize;
 163        atomic_sub(skb->truesize, &ip6_frags.mem);
 164        kfree_skb(skb);
 165}
 166
 167void ip6_frag_init(struct inet_frag_queue *q, void *a)
 168{
 169        struct frag_queue *fq = container_of(q, struct frag_queue, q);
 170        struct ip6_create_arg *arg = a;
 171
 172        fq->id = arg->id;
 173        ipv6_addr_copy(&fq->saddr, arg->src);
 174        ipv6_addr_copy(&fq->daddr, arg->dst);
 175}
 176EXPORT_SYMBOL(ip6_frag_init);
 177
 178/* Destruction primitives. */
 179
 180static __inline__ void fq_put(struct frag_queue *fq)
 181{
 182        inet_frag_put(&fq->q, &ip6_frags);
 183}
 184
 185/* Kill fq entry. It is not destroyed immediately,
 186 * because caller (and someone more) holds reference count.
 187 */
 188static __inline__ void fq_kill(struct frag_queue *fq)
 189{
 190        inet_frag_kill(&fq->q, &ip6_frags);
 191}
 192
 193static void ip6_evictor(struct inet6_dev *idev)
 194{
 195        int evicted;
 196
 197        evicted = inet_frag_evictor(&ip6_frags);
 198        if (evicted)
 199                IP6_ADD_STATS_BH(idev, IPSTATS_MIB_REASMFAILS, evicted);
 200}
 201
 202static void ip6_frag_expire(unsigned long data)
 203{
 204        struct frag_queue *fq;
 205        struct net_device *dev = NULL;
 206
 207        fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
 208
 209        spin_lock(&fq->q.lock);
 210
 211        if (fq->q.last_in & COMPLETE)
 212                goto out;
 213
 214        fq_kill(fq);
 215
 216        dev = dev_get_by_index(&init_net, fq->iif);
 217        if (!dev)
 218                goto out;
 219
 220        rcu_read_lock();
 221        IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
 222        IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
 223        rcu_read_unlock();
 224
 225        /* Don't send error if the first segment did not arrive. */
 226        if (!(fq->q.last_in&FIRST_IN) || !fq->q.fragments)
 227                goto out;
 228
 229        /*
 230           But use as source device on which LAST ARRIVED
 231           segment was received. And do not use fq->dev
 232           pointer directly, device might already disappeared.
 233         */
 234        fq->q.fragments->dev = dev;
 235        icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
 236out:
 237        if (dev)
 238                dev_put(dev);
 239        spin_unlock(&fq->q.lock);
 240        fq_put(fq);
 241}
 242
 243static __inline__ struct frag_queue *
 244fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
 245        struct inet6_dev *idev)
 246{
 247        struct inet_frag_queue *q;
 248        struct ip6_create_arg arg;
 249        unsigned int hash;
 250
 251        arg.id = id;
 252        arg.src = src;
 253        arg.dst = dst;
 254        hash = ip6qhashfn(id, src, dst);
 255
 256        q = inet_frag_find(&ip6_frags, &arg, hash);
 257        if (q == NULL)
 258                goto oom;
 259
 260        return container_of(q, struct frag_queue, q);
 261
 262oom:
 263        IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
 264        return NULL;
 265}
 266
 267static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
 268                           struct frag_hdr *fhdr, int nhoff)
 269{
 270        struct sk_buff *prev, *next;
 271        struct net_device *dev;
 272        int offset, end;
 273
 274        if (fq->q.last_in & COMPLETE)
 275                goto err;
 276
 277        offset = ntohs(fhdr->frag_off) & ~0x7;
 278        end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
 279                        ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
 280
 281        if ((unsigned int)end > IPV6_MAXPLEN) {
 282                IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
 283                                 IPSTATS_MIB_INHDRERRORS);
 284                icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
 285                                  ((u8 *)&fhdr->frag_off -
 286                                   skb_network_header(skb)));
 287                return -1;
 288        }
 289
 290        if (skb->ip_summed == CHECKSUM_COMPLETE) {
 291                const unsigned char *nh = skb_network_header(skb);
 292                skb->csum = csum_sub(skb->csum,
 293                                     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
 294                                                  0));
 295        }
 296
 297        /* Is this the final fragment? */
 298        if (!(fhdr->frag_off & htons(IP6_MF))) {
 299                /* If we already have some bits beyond end
 300                 * or have different end, the segment is corrupted.
 301                 */
 302                if (end < fq->q.len ||
 303                    ((fq->q.last_in & LAST_IN) && end != fq->q.len))
 304                        goto err;
 305                fq->q.last_in |= LAST_IN;
 306                fq->q.len = end;
 307        } else {
 308                /* Check if the fragment is rounded to 8 bytes.
 309                 * Required by the RFC.
 310                 */
 311                if (end & 0x7) {
 312                        /* RFC2460 says always send parameter problem in
 313                         * this case. -DaveM
 314                         */
 315                        IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
 316                                         IPSTATS_MIB_INHDRERRORS);
 317                        icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
 318                                          offsetof(struct ipv6hdr, payload_len));
 319                        return -1;
 320                }
 321                if (end > fq->q.len) {
 322                        /* Some bits beyond end -> corruption. */
 323                        if (fq->q.last_in & LAST_IN)
 324                                goto err;
 325                        fq->q.len = end;
 326                }
 327        }
 328
 329        if (end == offset)
 330                goto err;
 331
 332        /* Point into the IP datagram 'data' part. */
 333        if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
 334                goto err;
 335
 336        if (pskb_trim_rcsum(skb, end - offset))
 337                goto err;
 338
 339        /* Find out which fragments are in front and at the back of us
 340         * in the chain of fragments so far.  We must know where to put
 341         * this fragment, right?
 342         */
 343        prev = NULL;
 344        for(next = fq->q.fragments; next != NULL; next = next->next) {
 345                if (FRAG6_CB(next)->offset >= offset)
 346                        break;  /* bingo! */
 347                prev = next;
 348        }
 349
 350        /* We found where to put this one.  Check for overlap with
 351         * preceding fragment, and, if needed, align things so that
 352         * any overlaps are eliminated.
 353         */
 354        if (prev) {
 355                int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
 356
 357                if (i > 0) {
 358                        offset += i;
 359                        if (end <= offset)
 360                                goto err;
 361                        if (!pskb_pull(skb, i))
 362                                goto err;
 363                        if (skb->ip_summed != CHECKSUM_UNNECESSARY)
 364                                skb->ip_summed = CHECKSUM_NONE;
 365                }
 366        }
 367
 368        /* Look for overlap with succeeding segments.
 369         * If we can merge fragments, do it.
 370         */
 371        while (next && FRAG6_CB(next)->offset < end) {
 372                int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
 373
 374                if (i < next->len) {
 375                        /* Eat head of the next overlapped fragment
 376                         * and leave the loop. The next ones cannot overlap.
 377                         */
 378                        if (!pskb_pull(next, i))
 379                                goto err;
 380                        FRAG6_CB(next)->offset += i;    /* next fragment */
 381                        fq->q.meat -= i;
 382                        if (next->ip_summed != CHECKSUM_UNNECESSARY)
 383                                next->ip_summed = CHECKSUM_NONE;
 384                        break;
 385                } else {
 386                        struct sk_buff *free_it = next;
 387
 388                        /* Old fragment is completely overridden with
 389                         * new one drop it.
 390                         */
 391                        next = next->next;
 392
 393                        if (prev)
 394                                prev->next = next;
 395                        else
 396                                fq->q.fragments = next;
 397
 398                        fq->q.meat -= free_it->len;
 399                        frag_kfree_skb(free_it, NULL);
 400                }
 401        }
 402
 403        FRAG6_CB(skb)->offset = offset;
 404
 405        /* Insert this fragment in the chain of fragments. */
 406        skb->next = next;
 407        if (prev)
 408                prev->next = skb;
 409        else
 410                fq->q.fragments = skb;
 411
 412        dev = skb->dev;
 413        if (dev) {
 414                fq->iif = dev->ifindex;
 415                skb->dev = NULL;
 416        }
 417        fq->q.stamp = skb->tstamp;
 418        fq->q.meat += skb->len;
 419        atomic_add(skb->truesize, &ip6_frags.mem);
 420
 421        /* The first fragment.
 422         * nhoffset is obtained from the first fragment, of course.
 423         */
 424        if (offset == 0) {
 425                fq->nhoffset = nhoff;
 426                fq->q.last_in |= FIRST_IN;
 427        }
 428
 429        if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len)
 430                return ip6_frag_reasm(fq, prev, dev);
 431
 432        write_lock(&ip6_frags.lock);
 433        list_move_tail(&fq->q.lru_list, &ip6_frags.lru_list);
 434        write_unlock(&ip6_frags.lock);
 435        return -1;
 436
 437err:
 438        IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
 439        kfree_skb(skb);
 440        return -1;
 441}
 442
 443/*
 444 *      Check if this packet is complete.
 445 *      Returns NULL on failure by any reason, and pointer
 446 *      to current nexthdr field in reassembled frame.
 447 *
 448 *      It is called with locked fq, and caller must check that
 449 *      queue is eligible for reassembly i.e. it is not COMPLETE,
 450 *      the last and the first frames arrived and all the bits are here.
 451 */
 452static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
 453                          struct net_device *dev)
 454{
 455        struct sk_buff *fp, *head = fq->q.fragments;
 456        int    payload_len;
 457        unsigned int nhoff;
 458
 459        fq_kill(fq);
 460
 461        /* Make the one we just received the head. */
 462        if (prev) {
 463                head = prev->next;
 464                fp = skb_clone(head, GFP_ATOMIC);
 465
 466                if (!fp)
 467                        goto out_oom;
 468
 469                fp->next = head->next;
 470                prev->next = fp;
 471
 472                skb_morph(head, fq->q.fragments);
 473                head->next = fq->q.fragments->next;
 474
 475                kfree_skb(fq->q.fragments);
 476                fq->q.fragments = head;
 477        }
 478
 479        BUG_TRAP(head != NULL);
 480        BUG_TRAP(FRAG6_CB(head)->offset == 0);
 481
 482        /* Unfragmented part is taken from the first segment. */
 483        payload_len = ((head->data - skb_network_header(head)) -
 484                       sizeof(struct ipv6hdr) + fq->q.len -
 485                       sizeof(struct frag_hdr));
 486        if (payload_len > IPV6_MAXPLEN)
 487                goto out_oversize;
 488
 489        /* Head of list must not be cloned. */
 490        if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
 491                goto out_oom;
 492
 493        /* If the first fragment is fragmented itself, we split
 494         * it to two chunks: the first with data and paged part
 495         * and the second, holding only fragments. */
 496        if (skb_shinfo(head)->frag_list) {
 497                struct sk_buff *clone;
 498                int i, plen = 0;
 499
 500                if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
 501                        goto out_oom;
 502                clone->next = head->next;
 503                head->next = clone;
 504                skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
 505                skb_shinfo(head)->frag_list = NULL;
 506                for (i=0; i<skb_shinfo(head)->nr_frags; i++)
 507                        plen += skb_shinfo(head)->frags[i].size;
 508                clone->len = clone->data_len = head->data_len - plen;
 509                head->data_len -= clone->len;
 510                head->len -= clone->len;
 511                clone->csum = 0;
 512                clone->ip_summed = head->ip_summed;
 513                atomic_add(clone->truesize, &ip6_frags.mem);
 514        }
 515
 516        /* We have to remove fragment header from datagram and to relocate
 517         * header in order to calculate ICV correctly. */
 518        nhoff = fq->nhoffset;
 519        skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
 520        memmove(head->head + sizeof(struct frag_hdr), head->head,
 521                (head->data - head->head) - sizeof(struct frag_hdr));
 522        head->mac_header += sizeof(struct frag_hdr);
 523        head->network_header += sizeof(struct frag_hdr);
 524
 525        skb_shinfo(head)->frag_list = head->next;
 526        skb_reset_transport_header(head);
 527        skb_push(head, head->data - skb_network_header(head));
 528        atomic_sub(head->truesize, &ip6_frags.mem);
 529
 530        for (fp=head->next; fp; fp = fp->next) {
 531                head->data_len += fp->len;
 532                head->len += fp->len;
 533                if (head->ip_summed != fp->ip_summed)
 534                        head->ip_summed = CHECKSUM_NONE;
 535                else if (head->ip_summed == CHECKSUM_COMPLETE)
 536                        head->csum = csum_add(head->csum, fp->csum);
 537                head->truesize += fp->truesize;
 538                atomic_sub(fp->truesize, &ip6_frags.mem);
 539        }
 540
 541        head->next = NULL;
 542        head->dev = dev;
 543        head->tstamp = fq->q.stamp;
 544        ipv6_hdr(head)->payload_len = htons(payload_len);
 545        IP6CB(head)->nhoff = nhoff;
 546
 547        /* Yes, and fold redundant checksum back. 8) */
 548        if (head->ip_summed == CHECKSUM_COMPLETE)
 549                head->csum = csum_partial(skb_network_header(head),
 550                                          skb_network_header_len(head),
 551                                          head->csum);
 552
 553        rcu_read_lock();
 554        IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
 555        rcu_read_unlock();
 556        fq->q.fragments = NULL;
 557        return 1;
 558
 559out_oversize:
 560        if (net_ratelimit())
 561                printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
 562        goto out_fail;
 563out_oom:
 564        if (net_ratelimit())
 565                printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
 566out_fail:
 567        rcu_read_lock();
 568        IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
 569        rcu_read_unlock();
 570        return -1;
 571}
 572
 573static int ipv6_frag_rcv(struct sk_buff *skb)
 574{
 575        struct frag_hdr *fhdr;
 576        struct frag_queue *fq;
 577        struct ipv6hdr *hdr = ipv6_hdr(skb);
 578
 579        IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS);
 580
 581        /* Jumbo payload inhibits frag. header */
 582        if (hdr->payload_len==0) {
 583                IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
 584                icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
 585                                  skb_network_header_len(skb));
 586                return -1;
 587        }
 588        if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
 589                                 sizeof(struct frag_hdr)))) {
 590                IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
 591                icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
 592                                  skb_network_header_len(skb));
 593                return -1;
 594        }
 595
 596        hdr = ipv6_hdr(skb);
 597        fhdr = (struct frag_hdr *)skb_transport_header(skb);
 598
 599        if (!(fhdr->frag_off & htons(0xFFF9))) {
 600                /* It is not a fragmented frame */
 601                skb->transport_header += sizeof(struct frag_hdr);
 602                IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS);
 603
 604                IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
 605                return 1;
 606        }
 607
 608        if (atomic_read(&ip6_frags.mem) > ip6_frags_ctl.high_thresh)
 609                ip6_evictor(ip6_dst_idev(skb->dst));
 610
 611        if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr,
 612                          ip6_dst_idev(skb->dst))) != NULL) {
 613                int ret;
 614
 615                spin_lock(&fq->q.lock);
 616
 617                ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
 618
 619                spin_unlock(&fq->q.lock);
 620                fq_put(fq);
 621                return ret;
 622        }
 623
 624        IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
 625        kfree_skb(skb);
 626        return -1;
 627}
 628
 629static struct inet6_protocol frag_protocol =
 630{
 631        .handler        =       ipv6_frag_rcv,
 632        .flags          =       INET6_PROTO_NOPOLICY,
 633};
 634
 635void __init ipv6_frag_init(void)
 636{
 637        if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0)
 638                printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n");
 639
 640        ip6_frags.ctl = &ip6_frags_ctl;
 641        ip6_frags.hashfn = ip6_hashfn;
 642        ip6_frags.constructor = ip6_frag_init;
 643        ip6_frags.destructor = NULL;
 644        ip6_frags.skb_free = NULL;
 645        ip6_frags.qsize = sizeof(struct frag_queue);
 646        ip6_frags.match = ip6_frag_match;
 647        ip6_frags.frag_expire = ip6_frag_expire;
 648        inet_frags_init(&ip6_frags);
 649}
 650