linux/net/ipv6/esp6.c
<<
>>
Prefs
   1/*
   2 * Copyright (C)2002 USAGI/WIDE Project
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  16 *
  17 * Authors
  18 *
  19 *      Mitsuru KANDA @USAGI       : IPv6 Support
  20 *      Kazunori MIYAZAWA @USAGI   :
  21 *      Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  22 *
  23 *      This file is derived from net/ipv4/esp.c
  24 */
  25
  26#define pr_fmt(fmt) "IPv6: " fmt
  27
  28#include <crypto/aead.h>
  29#include <crypto/authenc.h>
  30#include <linux/err.h>
  31#include <linux/module.h>
  32#include <net/ip.h>
  33#include <net/xfrm.h>
  34#include <net/esp.h>
  35#include <linux/scatterlist.h>
  36#include <linux/kernel.h>
  37#include <linux/pfkeyv2.h>
  38#include <linux/random.h>
  39#include <linux/slab.h>
  40#include <linux/spinlock.h>
  41#include <net/ip6_route.h>
  42#include <net/icmp.h>
  43#include <net/ipv6.h>
  44#include <net/protocol.h>
  45#include <linux/icmpv6.h>
  46
  47struct esp_skb_cb {
  48        struct xfrm_skb_cb xfrm;
  49        void *tmp;
  50};
  51
  52#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
  53
  54static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
  55
  56/*
  57 * Allocate an AEAD request structure with extra space for SG and IV.
  58 *
  59 * For alignment considerations the upper 32 bits of the sequence number are
  60 * placed at the front, if present. Followed by the IV, the request and finally
  61 * the SG list.
  62 *
  63 * TODO: Use spare space in skb for this where possible.
  64 */
  65static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
  66{
  67        unsigned int len;
  68
  69        len = seqihlen;
  70
  71        len += crypto_aead_ivsize(aead);
  72
  73        if (len) {
  74                len += crypto_aead_alignmask(aead) &
  75                       ~(crypto_tfm_ctx_alignment() - 1);
  76                len = ALIGN(len, crypto_tfm_ctx_alignment());
  77        }
  78
  79        len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
  80        len = ALIGN(len, __alignof__(struct scatterlist));
  81
  82        len += sizeof(struct scatterlist) * nfrags;
  83
  84        return kmalloc(len, GFP_ATOMIC);
  85}
  86
  87static inline __be32 *esp_tmp_seqhi(void *tmp)
  88{
  89        return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
  90}
  91
  92static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
  93{
  94        return crypto_aead_ivsize(aead) ?
  95               PTR_ALIGN((u8 *)tmp + seqhilen,
  96                         crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
  97}
  98
  99static inline struct aead_givcrypt_request *esp_tmp_givreq(
 100        struct crypto_aead *aead, u8 *iv)
 101{
 102        struct aead_givcrypt_request *req;
 103
 104        req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
 105                                crypto_tfm_ctx_alignment());
 106        aead_givcrypt_set_tfm(req, aead);
 107        return req;
 108}
 109
 110static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
 111{
 112        struct aead_request *req;
 113
 114        req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
 115                                crypto_tfm_ctx_alignment());
 116        aead_request_set_tfm(req, aead);
 117        return req;
 118}
 119
 120static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
 121                                             struct aead_request *req)
 122{
 123        return (void *)ALIGN((unsigned long)(req + 1) +
 124                             crypto_aead_reqsize(aead),
 125                             __alignof__(struct scatterlist));
 126}
 127
 128static inline struct scatterlist *esp_givreq_sg(
 129        struct crypto_aead *aead, struct aead_givcrypt_request *req)
 130{
 131        return (void *)ALIGN((unsigned long)(req + 1) +
 132                             crypto_aead_reqsize(aead),
 133                             __alignof__(struct scatterlist));
 134}
 135
 136static void esp_output_done(struct crypto_async_request *base, int err)
 137{
 138        struct sk_buff *skb = base->data;
 139
 140        kfree(ESP_SKB_CB(skb)->tmp);
 141        xfrm_output_resume(skb, err);
 142}
 143
 144static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
 145{
 146        int err;
 147        struct ip_esp_hdr *esph;
 148        struct crypto_aead *aead;
 149        struct aead_givcrypt_request *req;
 150        struct scatterlist *sg;
 151        struct scatterlist *asg;
 152        struct sk_buff *trailer;
 153        void *tmp;
 154        int blksize;
 155        int clen;
 156        int alen;
 157        int plen;
 158        int tfclen;
 159        int nfrags;
 160        int assoclen;
 161        int sglists;
 162        int seqhilen;
 163        u8 *iv;
 164        u8 *tail;
 165        __be32 *seqhi;
 166
 167        /* skb is pure payload to encrypt */
 168        aead = x->data;
 169        alen = crypto_aead_authsize(aead);
 170
 171        tfclen = 0;
 172        if (x->tfcpad) {
 173                struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
 174                u32 padto;
 175
 176                padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
 177                if (skb->len < padto)
 178                        tfclen = padto - skb->len;
 179        }
 180        blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 181        clen = ALIGN(skb->len + 2 + tfclen, blksize);
 182        plen = clen - skb->len - tfclen;
 183
 184        err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
 185        if (err < 0)
 186                goto error;
 187        nfrags = err;
 188
 189        assoclen = sizeof(*esph);
 190        sglists = 1;
 191        seqhilen = 0;
 192
 193        if (x->props.flags & XFRM_STATE_ESN) {
 194                sglists += 2;
 195                seqhilen += sizeof(__be32);
 196                assoclen += seqhilen;
 197        }
 198
 199        tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
 200        if (!tmp) {
 201                err = -ENOMEM;
 202                goto error;
 203        }
 204
 205        seqhi = esp_tmp_seqhi(tmp);
 206        iv = esp_tmp_iv(aead, tmp, seqhilen);
 207        req = esp_tmp_givreq(aead, iv);
 208        asg = esp_givreq_sg(aead, req);
 209        sg = asg + sglists;
 210
 211        /* Fill padding... */
 212        tail = skb_tail_pointer(trailer);
 213        if (tfclen) {
 214                memset(tail, 0, tfclen);
 215                tail += tfclen;
 216        }
 217        do {
 218                int i;
 219                for (i = 0; i < plen - 2; i++)
 220                        tail[i] = i + 1;
 221        } while (0);
 222        tail[plen - 2] = plen - 2;
 223        tail[plen - 1] = *skb_mac_header(skb);
 224        pskb_put(skb, trailer, clen - skb->len + alen);
 225
 226        skb_push(skb, -skb_network_offset(skb));
 227        esph = ip_esp_hdr(skb);
 228        *skb_mac_header(skb) = IPPROTO_ESP;
 229
 230        esph->spi = x->id.spi;
 231        esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 232
 233        sg_init_table(sg, nfrags);
 234        skb_to_sgvec(skb, sg,
 235                     esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
 236                     clen + alen);
 237
 238        if ((x->props.flags & XFRM_STATE_ESN)) {
 239                sg_init_table(asg, 3);
 240                sg_set_buf(asg, &esph->spi, sizeof(__be32));
 241                *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
 242                sg_set_buf(asg + 1, seqhi, seqhilen);
 243                sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
 244        } else
 245                sg_init_one(asg, esph, sizeof(*esph));
 246
 247        aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
 248        aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
 249        aead_givcrypt_set_assoc(req, asg, assoclen);
 250        aead_givcrypt_set_giv(req, esph->enc_data,
 251                              XFRM_SKB_CB(skb)->seq.output.low +
 252                              ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
 253
 254        ESP_SKB_CB(skb)->tmp = tmp;
 255        err = crypto_aead_givencrypt(req);
 256        if (err == -EINPROGRESS)
 257                goto error;
 258
 259        if (err == -EBUSY)
 260                err = NET_XMIT_DROP;
 261
 262        kfree(tmp);
 263
 264error:
 265        return err;
 266}
 267
 268static int esp_input_done2(struct sk_buff *skb, int err)
 269{
 270        struct xfrm_state *x = xfrm_input_state(skb);
 271        struct crypto_aead *aead = x->data;
 272        int alen = crypto_aead_authsize(aead);
 273        int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
 274        int elen = skb->len - hlen;
 275        int hdr_len = skb_network_header_len(skb);
 276        int padlen;
 277        u8 nexthdr[2];
 278
 279        kfree(ESP_SKB_CB(skb)->tmp);
 280
 281        if (unlikely(err))
 282                goto out;
 283
 284        if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
 285                BUG();
 286
 287        err = -EINVAL;
 288        padlen = nexthdr[0];
 289        if (padlen + 2 + alen >= elen) {
 290                net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
 291                                    padlen + 2, elen - alen);
 292                goto out;
 293        }
 294
 295        /* ... check padding bits here. Silly. :-) */
 296
 297        pskb_trim(skb, skb->len - alen - padlen - 2);
 298        __skb_pull(skb, hlen);
 299        if (x->props.mode == XFRM_MODE_TUNNEL)
 300                skb_reset_transport_header(skb);
 301        else
 302                skb_set_transport_header(skb, -hdr_len);
 303
 304        err = nexthdr[1];
 305
 306        /* RFC4303: Drop dummy packets without any error */
 307        if (err == IPPROTO_NONE)
 308                err = -EINVAL;
 309
 310out:
 311        return err;
 312}
 313
 314static void esp_input_done(struct crypto_async_request *base, int err)
 315{
 316        struct sk_buff *skb = base->data;
 317
 318        xfrm_input_resume(skb, esp_input_done2(skb, err));
 319}
 320
 321static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
 322{
 323        struct ip_esp_hdr *esph;
 324        struct crypto_aead *aead = x->data;
 325        struct aead_request *req;
 326        struct sk_buff *trailer;
 327        int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
 328        int nfrags;
 329        int assoclen;
 330        int sglists;
 331        int seqhilen;
 332        int ret = 0;
 333        void *tmp;
 334        __be32 *seqhi;
 335        u8 *iv;
 336        struct scatterlist *sg;
 337        struct scatterlist *asg;
 338
 339        if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) {
 340                ret = -EINVAL;
 341                goto out;
 342        }
 343
 344        if (elen <= 0) {
 345                ret = -EINVAL;
 346                goto out;
 347        }
 348
 349        nfrags = skb_cow_data(skb, 0, &trailer);
 350        if (nfrags < 0) {
 351                ret = -EINVAL;
 352                goto out;
 353        }
 354
 355        ret = -ENOMEM;
 356
 357        assoclen = sizeof(*esph);
 358        sglists = 1;
 359        seqhilen = 0;
 360
 361        if (x->props.flags & XFRM_STATE_ESN) {
 362                sglists += 2;
 363                seqhilen += sizeof(__be32);
 364                assoclen += seqhilen;
 365        }
 366
 367        tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
 368        if (!tmp)
 369                goto out;
 370
 371        ESP_SKB_CB(skb)->tmp = tmp;
 372        seqhi = esp_tmp_seqhi(tmp);
 373        iv = esp_tmp_iv(aead, tmp, seqhilen);
 374        req = esp_tmp_req(aead, iv);
 375        asg = esp_req_sg(aead, req);
 376        sg = asg + sglists;
 377
 378        skb->ip_summed = CHECKSUM_NONE;
 379
 380        esph = (struct ip_esp_hdr *)skb->data;
 381
 382        /* Get ivec. This can be wrong, check against another impls. */
 383        iv = esph->enc_data;
 384
 385        sg_init_table(sg, nfrags);
 386        skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
 387
 388        if ((x->props.flags & XFRM_STATE_ESN)) {
 389                sg_init_table(asg, 3);
 390                sg_set_buf(asg, &esph->spi, sizeof(__be32));
 391                *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
 392                sg_set_buf(asg + 1, seqhi, seqhilen);
 393                sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
 394        } else
 395                sg_init_one(asg, esph, sizeof(*esph));
 396
 397        aead_request_set_callback(req, 0, esp_input_done, skb);
 398        aead_request_set_crypt(req, sg, sg, elen, iv);
 399        aead_request_set_assoc(req, asg, assoclen);
 400
 401        ret = crypto_aead_decrypt(req);
 402        if (ret == -EINPROGRESS)
 403                goto out;
 404
 405        ret = esp_input_done2(skb, ret);
 406
 407out:
 408        return ret;
 409}
 410
 411static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
 412{
 413        struct crypto_aead *aead = x->data;
 414        u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 415        unsigned int net_adj;
 416
 417        if (x->props.mode != XFRM_MODE_TUNNEL)
 418                net_adj = sizeof(struct ipv6hdr);
 419        else
 420                net_adj = 0;
 421
 422        return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
 423                 net_adj) & ~(blksize - 1)) + net_adj - 2;
 424}
 425
 426static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 427                    u8 type, u8 code, int offset, __be32 info)
 428{
 429        struct net *net = dev_net(skb->dev);
 430        const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
 431        struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
 432        struct xfrm_state *x;
 433
 434        if (type != ICMPV6_PKT_TOOBIG &&
 435            type != NDISC_REDIRECT)
 436                return 0;
 437
 438        x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
 439                              esph->spi, IPPROTO_ESP, AF_INET6);
 440        if (!x)
 441                return 0;
 442
 443        if (type == NDISC_REDIRECT)
 444                ip6_redirect(skb, net, skb->dev->ifindex, 0);
 445        else
 446                ip6_update_pmtu(skb, net, info, 0, 0);
 447        xfrm_state_put(x);
 448
 449        return 0;
 450}
 451
 452static void esp6_destroy(struct xfrm_state *x)
 453{
 454        struct crypto_aead *aead = x->data;
 455
 456        if (!aead)
 457                return;
 458
 459        crypto_free_aead(aead);
 460}
 461
 462static int esp_init_aead(struct xfrm_state *x)
 463{
 464        struct crypto_aead *aead;
 465        int err;
 466
 467        aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
 468        err = PTR_ERR(aead);
 469        if (IS_ERR(aead))
 470                goto error;
 471
 472        x->data = aead;
 473
 474        err = crypto_aead_setkey(aead, x->aead->alg_key,
 475                                 (x->aead->alg_key_len + 7) / 8);
 476        if (err)
 477                goto error;
 478
 479        err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
 480        if (err)
 481                goto error;
 482
 483error:
 484        return err;
 485}
 486
 487static int esp_init_authenc(struct xfrm_state *x)
 488{
 489        struct crypto_aead *aead;
 490        struct crypto_authenc_key_param *param;
 491        struct rtattr *rta;
 492        char *key;
 493        char *p;
 494        char authenc_name[CRYPTO_MAX_ALG_NAME];
 495        unsigned int keylen;
 496        int err;
 497
 498        err = -EINVAL;
 499        if (!x->ealg)
 500                goto error;
 501
 502        err = -ENAMETOOLONG;
 503
 504        if ((x->props.flags & XFRM_STATE_ESN)) {
 505                if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
 506                             "authencesn(%s,%s)",
 507                             x->aalg ? x->aalg->alg_name : "digest_null",
 508                             x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
 509                        goto error;
 510        } else {
 511                if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
 512                             "authenc(%s,%s)",
 513                             x->aalg ? x->aalg->alg_name : "digest_null",
 514                             x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
 515                        goto error;
 516        }
 517
 518        aead = crypto_alloc_aead(authenc_name, 0, 0);
 519        err = PTR_ERR(aead);
 520        if (IS_ERR(aead))
 521                goto error;
 522
 523        x->data = aead;
 524
 525        keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
 526                 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
 527        err = -ENOMEM;
 528        key = kmalloc(keylen, GFP_KERNEL);
 529        if (!key)
 530                goto error;
 531
 532        p = key;
 533        rta = (void *)p;
 534        rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
 535        rta->rta_len = RTA_LENGTH(sizeof(*param));
 536        param = RTA_DATA(rta);
 537        p += RTA_SPACE(sizeof(*param));
 538
 539        if (x->aalg) {
 540                struct xfrm_algo_desc *aalg_desc;
 541
 542                memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
 543                p += (x->aalg->alg_key_len + 7) / 8;
 544
 545                aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
 546                BUG_ON(!aalg_desc);
 547
 548                err = -EINVAL;
 549                if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
 550                    crypto_aead_authsize(aead)) {
 551                        pr_info("ESP: %s digestsize %u != %hu\n",
 552                                x->aalg->alg_name,
 553                                crypto_aead_authsize(aead),
 554                                aalg_desc->uinfo.auth.icv_fullbits / 8);
 555                        goto free_key;
 556                }
 557
 558                err = crypto_aead_setauthsize(
 559                        aead, x->aalg->alg_trunc_len / 8);
 560                if (err)
 561                        goto free_key;
 562        }
 563
 564        param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
 565        memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
 566
 567        err = crypto_aead_setkey(aead, key, keylen);
 568
 569free_key:
 570        kfree(key);
 571
 572error:
 573        return err;
 574}
 575
 576static int esp6_init_state(struct xfrm_state *x)
 577{
 578        struct crypto_aead *aead;
 579        u32 align;
 580        int err;
 581
 582        if (x->encap)
 583                return -EINVAL;
 584
 585        x->data = NULL;
 586
 587        if (x->aead)
 588                err = esp_init_aead(x);
 589        else
 590                err = esp_init_authenc(x);
 591
 592        if (err)
 593                goto error;
 594
 595        aead = x->data;
 596
 597        x->props.header_len = sizeof(struct ip_esp_hdr) +
 598                              crypto_aead_ivsize(aead);
 599        switch (x->props.mode) {
 600        case XFRM_MODE_BEET:
 601                if (x->sel.family != AF_INET6)
 602                        x->props.header_len += IPV4_BEET_PHMAXLEN +
 603                                               (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
 604                break;
 605        case XFRM_MODE_TRANSPORT:
 606                break;
 607        case XFRM_MODE_TUNNEL:
 608                x->props.header_len += sizeof(struct ipv6hdr);
 609                break;
 610        default:
 611                goto error;
 612        }
 613
 614        align = ALIGN(crypto_aead_blocksize(aead), 4);
 615        x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
 616
 617error:
 618        return err;
 619}
 620
 621static int esp6_rcv_cb(struct sk_buff *skb, int err)
 622{
 623        return 0;
 624}
 625
 626static const struct xfrm_type esp6_type = {
 627        .description    = "ESP6",
 628        .owner          = THIS_MODULE,
 629        .proto          = IPPROTO_ESP,
 630        .flags          = XFRM_TYPE_REPLAY_PROT,
 631        .init_state     = esp6_init_state,
 632        .destructor     = esp6_destroy,
 633        .get_mtu        = esp6_get_mtu,
 634        .input          = esp6_input,
 635        .output         = esp6_output,
 636        .hdr_offset     = xfrm6_find_1stfragopt,
 637};
 638
 639static struct xfrm6_protocol esp6_protocol = {
 640        .handler        =       xfrm6_rcv,
 641        .cb_handler     =       esp6_rcv_cb,
 642        .err_handler    =       esp6_err,
 643        .priority       =       0,
 644};
 645
 646static int __init esp6_init(void)
 647{
 648        if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
 649                pr_info("%s: can't add xfrm type\n", __func__);
 650                return -EAGAIN;
 651        }
 652        if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
 653                pr_info("%s: can't add protocol\n", __func__);
 654                xfrm_unregister_type(&esp6_type, AF_INET6);
 655                return -EAGAIN;
 656        }
 657
 658        return 0;
 659}
 660
 661static void __exit esp6_fini(void)
 662{
 663        if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
 664                pr_info("%s: can't remove protocol\n", __func__);
 665        if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
 666                pr_info("%s: can't remove xfrm type\n", __func__);
 667}
 668
 669module_init(esp6_init);
 670module_exit(esp6_fini);
 671
 672MODULE_LICENSE("GPL");
 673MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
 674