linux/net/ipv6/esp6.c
<<
>>
Prefs
   1/*
   2 * Copyright (C)2002 USAGI/WIDE Project
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  17 *
  18 * Authors
  19 *
  20 *      Mitsuru KANDA @USAGI       : IPv6 Support
  21 *      Kazunori MIYAZAWA @USAGI   :
  22 *      Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  23 *
  24 *      This file is derived from net/ipv4/esp.c
  25 */
  26
  27#define pr_fmt(fmt) "IPv6: " fmt
  28
  29#include <crypto/aead.h>
  30#include <crypto/authenc.h>
  31#include <linux/err.h>
  32#include <linux/module.h>
  33#include <net/ip.h>
  34#include <net/xfrm.h>
  35#include <net/esp.h>
  36#include <linux/scatterlist.h>
  37#include <linux/kernel.h>
  38#include <linux/pfkeyv2.h>
  39#include <linux/random.h>
  40#include <linux/slab.h>
  41#include <linux/spinlock.h>
  42#include <net/ip6_route.h>
  43#include <net/icmp.h>
  44#include <net/ipv6.h>
  45#include <net/protocol.h>
  46#include <linux/icmpv6.h>
  47
  48struct esp_skb_cb {
  49        struct xfrm_skb_cb xfrm;
  50        void *tmp;
  51};
  52
  53#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
  54
  55static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
  56
  57/*
  58 * Allocate an AEAD request structure with extra space for SG and IV.
  59 *
  60 * For alignment considerations the upper 32 bits of the sequence number are
  61 * placed at the front, if present. Followed by the IV, the request and finally
  62 * the SG list.
  63 *
  64 * TODO: Use spare space in skb for this where possible.
  65 */
  66static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
  67{
  68        unsigned int len;
  69
  70        len = seqihlen;
  71
  72        len += crypto_aead_ivsize(aead);
  73
  74        if (len) {
  75                len += crypto_aead_alignmask(aead) &
  76                       ~(crypto_tfm_ctx_alignment() - 1);
  77                len = ALIGN(len, crypto_tfm_ctx_alignment());
  78        }
  79
  80        len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
  81        len = ALIGN(len, __alignof__(struct scatterlist));
  82
  83        len += sizeof(struct scatterlist) * nfrags;
  84
  85        return kmalloc(len, GFP_ATOMIC);
  86}
  87
  88static inline __be32 *esp_tmp_seqhi(void *tmp)
  89{
  90        return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
  91}
  92
  93static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
  94{
  95        return crypto_aead_ivsize(aead) ?
  96               PTR_ALIGN((u8 *)tmp + seqhilen,
  97                         crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
  98}
  99
 100static inline struct aead_givcrypt_request *esp_tmp_givreq(
 101        struct crypto_aead *aead, u8 *iv)
 102{
 103        struct aead_givcrypt_request *req;
 104
 105        req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
 106                                crypto_tfm_ctx_alignment());
 107        aead_givcrypt_set_tfm(req, aead);
 108        return req;
 109}
 110
 111static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
 112{
 113        struct aead_request *req;
 114
 115        req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
 116                                crypto_tfm_ctx_alignment());
 117        aead_request_set_tfm(req, aead);
 118        return req;
 119}
 120
 121static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
 122                                             struct aead_request *req)
 123{
 124        return (void *)ALIGN((unsigned long)(req + 1) +
 125                             crypto_aead_reqsize(aead),
 126                             __alignof__(struct scatterlist));
 127}
 128
 129static inline struct scatterlist *esp_givreq_sg(
 130        struct crypto_aead *aead, struct aead_givcrypt_request *req)
 131{
 132        return (void *)ALIGN((unsigned long)(req + 1) +
 133                             crypto_aead_reqsize(aead),
 134                             __alignof__(struct scatterlist));
 135}
 136
 137static void esp_output_done(struct crypto_async_request *base, int err)
 138{
 139        struct sk_buff *skb = base->data;
 140
 141        kfree(ESP_SKB_CB(skb)->tmp);
 142        xfrm_output_resume(skb, err);
 143}
 144
 145static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
 146{
 147        int err;
 148        struct ip_esp_hdr *esph;
 149        struct crypto_aead *aead;
 150        struct aead_givcrypt_request *req;
 151        struct scatterlist *sg;
 152        struct scatterlist *asg;
 153        struct sk_buff *trailer;
 154        void *tmp;
 155        int blksize;
 156        int clen;
 157        int alen;
 158        int plen;
 159        int tfclen;
 160        int nfrags;
 161        int assoclen;
 162        int sglists;
 163        int seqhilen;
 164        u8 *iv;
 165        u8 *tail;
 166        __be32 *seqhi;
 167        struct esp_data *esp = x->data;
 168
 169        /* skb is pure payload to encrypt */
 170        aead = esp->aead;
 171        alen = crypto_aead_authsize(aead);
 172
 173        tfclen = 0;
 174        if (x->tfcpad) {
 175                struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
 176                u32 padto;
 177
 178                padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
 179                if (skb->len < padto)
 180                        tfclen = padto - skb->len;
 181        }
 182        blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 183        clen = ALIGN(skb->len + 2 + tfclen, blksize);
 184        if (esp->padlen)
 185                clen = ALIGN(clen, esp->padlen);
 186        plen = clen - skb->len - tfclen;
 187
 188        err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
 189        if (err < 0)
 190                goto error;
 191        nfrags = err;
 192
 193        assoclen = sizeof(*esph);
 194        sglists = 1;
 195        seqhilen = 0;
 196
 197        if (x->props.flags & XFRM_STATE_ESN) {
 198                sglists += 2;
 199                seqhilen += sizeof(__be32);
 200                assoclen += seqhilen;
 201        }
 202
 203        tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
 204        if (!tmp) {
 205                err = -ENOMEM;
 206                goto error;
 207        }
 208
 209        seqhi = esp_tmp_seqhi(tmp);
 210        iv = esp_tmp_iv(aead, tmp, seqhilen);
 211        req = esp_tmp_givreq(aead, iv);
 212        asg = esp_givreq_sg(aead, req);
 213        sg = asg + sglists;
 214
 215        /* Fill padding... */
 216        tail = skb_tail_pointer(trailer);
 217        if (tfclen) {
 218                memset(tail, 0, tfclen);
 219                tail += tfclen;
 220        }
 221        do {
 222                int i;
 223                for (i = 0; i < plen - 2; i++)
 224                        tail[i] = i + 1;
 225        } while (0);
 226        tail[plen - 2] = plen - 2;
 227        tail[plen - 1] = *skb_mac_header(skb);
 228        pskb_put(skb, trailer, clen - skb->len + alen);
 229
 230        skb_push(skb, -skb_network_offset(skb));
 231        esph = ip_esp_hdr(skb);
 232        *skb_mac_header(skb) = IPPROTO_ESP;
 233
 234        esph->spi = x->id.spi;
 235        esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 236
 237        sg_init_table(sg, nfrags);
 238        skb_to_sgvec(skb, sg,
 239                     esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
 240                     clen + alen);
 241
 242        if ((x->props.flags & XFRM_STATE_ESN)) {
 243                sg_init_table(asg, 3);
 244                sg_set_buf(asg, &esph->spi, sizeof(__be32));
 245                *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
 246                sg_set_buf(asg + 1, seqhi, seqhilen);
 247                sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
 248        } else
 249                sg_init_one(asg, esph, sizeof(*esph));
 250
 251        aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
 252        aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
 253        aead_givcrypt_set_assoc(req, asg, assoclen);
 254        aead_givcrypt_set_giv(req, esph->enc_data,
 255                              XFRM_SKB_CB(skb)->seq.output.low +
 256                              ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
 257
 258        ESP_SKB_CB(skb)->tmp = tmp;
 259        err = crypto_aead_givencrypt(req);
 260        if (err == -EINPROGRESS)
 261                goto error;
 262
 263        if (err == -EBUSY)
 264                err = NET_XMIT_DROP;
 265
 266        kfree(tmp);
 267
 268error:
 269        return err;
 270}
 271
 272static int esp_input_done2(struct sk_buff *skb, int err)
 273{
 274        struct xfrm_state *x = xfrm_input_state(skb);
 275        struct esp_data *esp = x->data;
 276        struct crypto_aead *aead = esp->aead;
 277        int alen = crypto_aead_authsize(aead);
 278        int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
 279        int elen = skb->len - hlen;
 280        int hdr_len = skb_network_header_len(skb);
 281        int padlen;
 282        u8 nexthdr[2];
 283
 284        kfree(ESP_SKB_CB(skb)->tmp);
 285
 286        if (unlikely(err))
 287                goto out;
 288
 289        if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
 290                BUG();
 291
 292        err = -EINVAL;
 293        padlen = nexthdr[0];
 294        if (padlen + 2 + alen >= elen) {
 295                LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage "
 296                               "padlen=%d, elen=%d\n", padlen + 2, elen - alen);
 297                goto out;
 298        }
 299
 300        /* ... check padding bits here. Silly. :-) */
 301
 302        pskb_trim(skb, skb->len - alen - padlen - 2);
 303        __skb_pull(skb, hlen);
 304        if (x->props.mode == XFRM_MODE_TUNNEL)
 305                skb_reset_transport_header(skb);
 306        else
 307                skb_set_transport_header(skb, -hdr_len);
 308
 309        err = nexthdr[1];
 310
 311        /* RFC4303: Drop dummy packets without any error */
 312        if (err == IPPROTO_NONE)
 313                err = -EINVAL;
 314
 315out:
 316        return err;
 317}
 318
 319static void esp_input_done(struct crypto_async_request *base, int err)
 320{
 321        struct sk_buff *skb = base->data;
 322
 323        xfrm_input_resume(skb, esp_input_done2(skb, err));
 324}
 325
 326static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
 327{
 328        struct ip_esp_hdr *esph;
 329        struct esp_data *esp = x->data;
 330        struct crypto_aead *aead = esp->aead;
 331        struct aead_request *req;
 332        struct sk_buff *trailer;
 333        int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
 334        int nfrags;
 335        int assoclen;
 336        int sglists;
 337        int seqhilen;
 338        int ret = 0;
 339        void *tmp;
 340        __be32 *seqhi;
 341        u8 *iv;
 342        struct scatterlist *sg;
 343        struct scatterlist *asg;
 344
 345        if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) {
 346                ret = -EINVAL;
 347                goto out;
 348        }
 349
 350        if (elen <= 0) {
 351                ret = -EINVAL;
 352                goto out;
 353        }
 354
 355        if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
 356                ret = -EINVAL;
 357                goto out;
 358        }
 359
 360        ret = -ENOMEM;
 361
 362        assoclen = sizeof(*esph);
 363        sglists = 1;
 364        seqhilen = 0;
 365
 366        if (x->props.flags & XFRM_STATE_ESN) {
 367                sglists += 2;
 368                seqhilen += sizeof(__be32);
 369                assoclen += seqhilen;
 370        }
 371
 372        tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
 373        if (!tmp)
 374                goto out;
 375
 376        ESP_SKB_CB(skb)->tmp = tmp;
 377        seqhi = esp_tmp_seqhi(tmp);
 378        iv = esp_tmp_iv(aead, tmp, seqhilen);
 379        req = esp_tmp_req(aead, iv);
 380        asg = esp_req_sg(aead, req);
 381        sg = asg + sglists;
 382
 383        skb->ip_summed = CHECKSUM_NONE;
 384
 385        esph = (struct ip_esp_hdr *)skb->data;
 386
 387        /* Get ivec. This can be wrong, check against another impls. */
 388        iv = esph->enc_data;
 389
 390        sg_init_table(sg, nfrags);
 391        skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
 392
 393        if ((x->props.flags & XFRM_STATE_ESN)) {
 394                sg_init_table(asg, 3);
 395                sg_set_buf(asg, &esph->spi, sizeof(__be32));
 396                *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
 397                sg_set_buf(asg + 1, seqhi, seqhilen);
 398                sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
 399        } else
 400                sg_init_one(asg, esph, sizeof(*esph));
 401
 402        aead_request_set_callback(req, 0, esp_input_done, skb);
 403        aead_request_set_crypt(req, sg, sg, elen, iv);
 404        aead_request_set_assoc(req, asg, assoclen);
 405
 406        ret = crypto_aead_decrypt(req);
 407        if (ret == -EINPROGRESS)
 408                goto out;
 409
 410        ret = esp_input_done2(skb, ret);
 411
 412out:
 413        return ret;
 414}
 415
 416static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
 417{
 418        struct esp_data *esp = x->data;
 419        u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
 420        u32 align = max_t(u32, blksize, esp->padlen);
 421        unsigned int net_adj;
 422
 423        if (x->props.mode != XFRM_MODE_TUNNEL)
 424                net_adj = sizeof(struct ipv6hdr);
 425        else
 426                net_adj = 0;
 427
 428        return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
 429                 net_adj) & ~(align - 1)) + net_adj - 2;
 430}
 431
 432static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 433                    u8 type, u8 code, int offset, __be32 info)
 434{
 435        struct net *net = dev_net(skb->dev);
 436        const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
 437        struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
 438        struct xfrm_state *x;
 439
 440        if (type != ICMPV6_PKT_TOOBIG &&
 441            type != NDISC_REDIRECT)
 442                return 0;
 443
 444        x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
 445                              esph->spi, IPPROTO_ESP, AF_INET6);
 446        if (!x)
 447                return 0;
 448
 449        if (type == NDISC_REDIRECT)
 450                ip6_redirect(skb, net, skb->dev->ifindex, 0);
 451        else
 452                ip6_update_pmtu(skb, net, info, 0, 0);
 453        xfrm_state_put(x);
 454
 455        return 0;
 456}
 457
 458static void esp6_destroy(struct xfrm_state *x)
 459{
 460        struct esp_data *esp = x->data;
 461
 462        if (!esp)
 463                return;
 464
 465        crypto_free_aead(esp->aead);
 466        kfree(esp);
 467}
 468
 469static int esp_init_aead(struct xfrm_state *x)
 470{
 471        struct esp_data *esp = x->data;
 472        struct crypto_aead *aead;
 473        int err;
 474
 475        aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
 476        err = PTR_ERR(aead);
 477        if (IS_ERR(aead))
 478                goto error;
 479
 480        esp->aead = aead;
 481
 482        err = crypto_aead_setkey(aead, x->aead->alg_key,
 483                                 (x->aead->alg_key_len + 7) / 8);
 484        if (err)
 485                goto error;
 486
 487        err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
 488        if (err)
 489                goto error;
 490
 491error:
 492        return err;
 493}
 494
 495static int esp_init_authenc(struct xfrm_state *x)
 496{
 497        struct esp_data *esp = x->data;
 498        struct crypto_aead *aead;
 499        struct crypto_authenc_key_param *param;
 500        struct rtattr *rta;
 501        char *key;
 502        char *p;
 503        char authenc_name[CRYPTO_MAX_ALG_NAME];
 504        unsigned int keylen;
 505        int err;
 506
 507        err = -EINVAL;
 508        if (x->ealg == NULL)
 509                goto error;
 510
 511        err = -ENAMETOOLONG;
 512
 513        if ((x->props.flags & XFRM_STATE_ESN)) {
 514                if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
 515                             "authencesn(%s,%s)",
 516                             x->aalg ? x->aalg->alg_name : "digest_null",
 517                             x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
 518                        goto error;
 519        } else {
 520                if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
 521                             "authenc(%s,%s)",
 522                             x->aalg ? x->aalg->alg_name : "digest_null",
 523                             x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
 524                        goto error;
 525        }
 526
 527        aead = crypto_alloc_aead(authenc_name, 0, 0);
 528        err = PTR_ERR(aead);
 529        if (IS_ERR(aead))
 530                goto error;
 531
 532        esp->aead = aead;
 533
 534        keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
 535                 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
 536        err = -ENOMEM;
 537        key = kmalloc(keylen, GFP_KERNEL);
 538        if (!key)
 539                goto error;
 540
 541        p = key;
 542        rta = (void *)p;
 543        rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
 544        rta->rta_len = RTA_LENGTH(sizeof(*param));
 545        param = RTA_DATA(rta);
 546        p += RTA_SPACE(sizeof(*param));
 547
 548        if (x->aalg) {
 549                struct xfrm_algo_desc *aalg_desc;
 550
 551                memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
 552                p += (x->aalg->alg_key_len + 7) / 8;
 553
 554                aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
 555                BUG_ON(!aalg_desc);
 556
 557                err = -EINVAL;
 558                if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
 559                    crypto_aead_authsize(aead)) {
 560                        NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
 561                                 x->aalg->alg_name,
 562                                 crypto_aead_authsize(aead),
 563                                 aalg_desc->uinfo.auth.icv_fullbits/8);
 564                        goto free_key;
 565                }
 566
 567                err = crypto_aead_setauthsize(
 568                        aead, x->aalg->alg_trunc_len / 8);
 569                if (err)
 570                        goto free_key;
 571        }
 572
 573        param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
 574        memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
 575
 576        err = crypto_aead_setkey(aead, key, keylen);
 577
 578free_key:
 579        kfree(key);
 580
 581error:
 582        return err;
 583}
 584
 585static int esp6_init_state(struct xfrm_state *x)
 586{
 587        struct esp_data *esp;
 588        struct crypto_aead *aead;
 589        u32 align;
 590        int err;
 591
 592        if (x->encap)
 593                return -EINVAL;
 594
 595        esp = kzalloc(sizeof(*esp), GFP_KERNEL);
 596        if (esp == NULL)
 597                return -ENOMEM;
 598
 599        x->data = esp;
 600
 601        if (x->aead)
 602                err = esp_init_aead(x);
 603        else
 604                err = esp_init_authenc(x);
 605
 606        if (err)
 607                goto error;
 608
 609        aead = esp->aead;
 610
 611        esp->padlen = 0;
 612
 613        x->props.header_len = sizeof(struct ip_esp_hdr) +
 614                              crypto_aead_ivsize(aead);
 615        switch (x->props.mode) {
 616        case XFRM_MODE_BEET:
 617                if (x->sel.family != AF_INET6)
 618                        x->props.header_len += IPV4_BEET_PHMAXLEN +
 619                                               (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
 620                break;
 621        case XFRM_MODE_TRANSPORT:
 622                break;
 623        case XFRM_MODE_TUNNEL:
 624                x->props.header_len += sizeof(struct ipv6hdr);
 625                break;
 626        default:
 627                goto error;
 628        }
 629
 630        align = ALIGN(crypto_aead_blocksize(aead), 4);
 631        if (esp->padlen)
 632                align = max_t(u32, align, esp->padlen);
 633        x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
 634
 635error:
 636        return err;
 637}
 638
 639static int esp6_rcv_cb(struct sk_buff *skb, int err)
 640{
 641        return 0;
 642}
 643
 644static const struct xfrm_type esp6_type =
 645{
 646        .description    = "ESP6",
 647        .owner          = THIS_MODULE,
 648        .proto          = IPPROTO_ESP,
 649        .flags          = XFRM_TYPE_REPLAY_PROT,
 650        .init_state     = esp6_init_state,
 651        .destructor     = esp6_destroy,
 652        .get_mtu        = esp6_get_mtu,
 653        .input          = esp6_input,
 654        .output         = esp6_output,
 655        .hdr_offset     = xfrm6_find_1stfragopt,
 656};
 657
 658static struct xfrm6_protocol esp6_protocol = {
 659        .handler        =       xfrm6_rcv,
 660        .cb_handler     =       esp6_rcv_cb,
 661        .err_handler    =       esp6_err,
 662        .priority       =       0,
 663};
 664
 665static int __init esp6_init(void)
 666{
 667        if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
 668                pr_info("%s: can't add xfrm type\n", __func__);
 669                return -EAGAIN;
 670        }
 671        if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
 672                pr_info("%s: can't add protocol\n", __func__);
 673                xfrm_unregister_type(&esp6_type, AF_INET6);
 674                return -EAGAIN;
 675        }
 676
 677        return 0;
 678}
 679
 680static void __exit esp6_fini(void)
 681{
 682        if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
 683                pr_info("%s: can't remove protocol\n", __func__);
 684        if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
 685                pr_info("%s: can't remove xfrm type\n", __func__);
 686}
 687
 688module_init(esp6_init);
 689module_exit(esp6_fini);
 690
 691MODULE_LICENSE("GPL");
 692MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
 693