linux/net/ipv4/esp4.c
<<
>>
Prefs
   1#define pr_fmt(fmt) "IPsec: " fmt
   2
   3#include <crypto/aead.h>
   4#include <crypto/authenc.h>
   5#include <linux/err.h>
   6#include <linux/module.h>
   7#include <net/ip.h>
   8#include <net/xfrm.h>
   9#include <net/esp.h>
  10#include <linux/scatterlist.h>
  11#include <linux/kernel.h>
  12#include <linux/pfkeyv2.h>
  13#include <linux/rtnetlink.h>
  14#include <linux/slab.h>
  15#include <linux/spinlock.h>
  16#include <linux/in6.h>
  17#include <net/icmp.h>
  18#include <net/protocol.h>
  19#include <net/udp.h>
  20
  21#include <linux/highmem.h>
  22
  23struct esp_skb_cb {
  24        struct xfrm_skb_cb xfrm;
  25        void *tmp;
  26};
  27
  28struct esp_output_extra {
  29        __be32 seqhi;
  30        u32 esphoff;
  31};
  32
  33#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
  34
  35static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
  36
  37/*
  38 * Allocate an AEAD request structure with extra space for SG and IV.
  39 *
  40 * For alignment considerations the IV is placed at the front, followed
  41 * by the request and finally the SG list.
  42 *
  43 * TODO: Use spare space in skb for this where possible.
  44 */
  45static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
  46{
  47        unsigned int len;
  48
  49        len = extralen;
  50
  51        len += crypto_aead_ivsize(aead);
  52
  53        if (len) {
  54                len += crypto_aead_alignmask(aead) &
  55                       ~(crypto_tfm_ctx_alignment() - 1);
  56                len = ALIGN(len, crypto_tfm_ctx_alignment());
  57        }
  58
  59        len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
  60        len = ALIGN(len, __alignof__(struct scatterlist));
  61
  62        len += sizeof(struct scatterlist) * nfrags;
  63
  64        return kmalloc(len, GFP_ATOMIC);
  65}
  66
  67static inline void *esp_tmp_extra(void *tmp)
  68{
  69        return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
  70}
  71
  72static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
  73{
  74        return crypto_aead_ivsize(aead) ?
  75               PTR_ALIGN((u8 *)tmp + extralen,
  76                         crypto_aead_alignmask(aead) + 1) : tmp + extralen;
  77}
  78
  79static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
  80{
  81        struct aead_request *req;
  82
  83        req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
  84                                crypto_tfm_ctx_alignment());
  85        aead_request_set_tfm(req, aead);
  86        return req;
  87}
  88
  89static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
  90                                             struct aead_request *req)
  91{
  92        return (void *)ALIGN((unsigned long)(req + 1) +
  93                             crypto_aead_reqsize(aead),
  94                             __alignof__(struct scatterlist));
  95}
  96
  97static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
  98{
  99        struct esp_output_extra *extra = esp_tmp_extra(tmp);
 100        struct crypto_aead *aead = x->data;
 101        int extralen = 0;
 102        u8 *iv;
 103        struct aead_request *req;
 104        struct scatterlist *sg;
 105
 106        if (x->props.flags & XFRM_STATE_ESN)
 107                extralen += sizeof(*extra);
 108
 109        extra = esp_tmp_extra(tmp);
 110        iv = esp_tmp_iv(aead, tmp, extralen);
 111        req = esp_tmp_req(aead, iv);
 112
 113        /* Unref skb_frag_pages in the src scatterlist if necessary.
 114         * Skip the first sg which comes from skb->data.
 115         */
 116        if (req->src != req->dst)
 117                for (sg = sg_next(req->src); sg; sg = sg_next(sg))
 118                        put_page(sg_page(sg));
 119}
 120
 121static void esp_output_done(struct crypto_async_request *base, int err)
 122{
 123        struct sk_buff *skb = base->data;
 124        struct xfrm_offload *xo = xfrm_offload(skb);
 125        void *tmp;
 126        struct xfrm_state *x;
 127
 128        if (xo && (xo->flags & XFRM_DEV_RESUME))
 129                x = skb->sp->xvec[skb->sp->len - 1];
 130        else
 131                x = skb_dst(skb)->xfrm;
 132
 133        tmp = ESP_SKB_CB(skb)->tmp;
 134        esp_ssg_unref(x, tmp);
 135        kfree(tmp);
 136
 137        if (xo && (xo->flags & XFRM_DEV_RESUME)) {
 138                if (err) {
 139                        XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
 140                        kfree_skb(skb);
 141                        return;
 142                }
 143
 144                skb_push(skb, skb->data - skb_mac_header(skb));
 145                secpath_reset(skb);
 146                xfrm_dev_resume(skb);
 147        } else {
 148                xfrm_output_resume(skb, err);
 149        }
 150}
 151
 152/* Move ESP header back into place. */
 153static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
 154{
 155        struct ip_esp_hdr *esph = (void *)(skb->data + offset);
 156        void *tmp = ESP_SKB_CB(skb)->tmp;
 157        __be32 *seqhi = esp_tmp_extra(tmp);
 158
 159        esph->seq_no = esph->spi;
 160        esph->spi = *seqhi;
 161}
 162
 163static void esp_output_restore_header(struct sk_buff *skb)
 164{
 165        void *tmp = ESP_SKB_CB(skb)->tmp;
 166        struct esp_output_extra *extra = esp_tmp_extra(tmp);
 167
 168        esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
 169                                sizeof(__be32));
 170}
 171
 172static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
 173                                               struct xfrm_state *x,
 174                                               struct ip_esp_hdr *esph,
 175                                               struct esp_output_extra *extra)
 176{
 177        /* For ESN we move the header forward by 4 bytes to
 178         * accomodate the high bits.  We will move it back after
 179         * encryption.
 180         */
 181        if ((x->props.flags & XFRM_STATE_ESN)) {
 182                __u32 seqhi;
 183                struct xfrm_offload *xo = xfrm_offload(skb);
 184
 185                if (xo)
 186                        seqhi = xo->seq.hi;
 187                else
 188                        seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
 189
 190                extra->esphoff = (unsigned char *)esph -
 191                                 skb_transport_header(skb);
 192                esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
 193                extra->seqhi = esph->spi;
 194                esph->seq_no = htonl(seqhi);
 195        }
 196
 197        esph->spi = x->id.spi;
 198
 199        return esph;
 200}
 201
 202static void esp_output_done_esn(struct crypto_async_request *base, int err)
 203{
 204        struct sk_buff *skb = base->data;
 205
 206        esp_output_restore_header(skb);
 207        esp_output_done(base, err);
 208}
 209
 210static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
 211{
 212        /* Fill padding... */
 213        if (tfclen) {
 214                memset(tail, 0, tfclen);
 215                tail += tfclen;
 216        }
 217        do {
 218                int i;
 219                for (i = 0; i < plen - 2; i++)
 220                        tail[i] = i + 1;
 221        } while (0);
 222        tail[plen - 2] = plen - 2;
 223        tail[plen - 1] = proto;
 224}
 225
 226static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 227{
 228        int encap_type;
 229        struct udphdr *uh;
 230        __be32 *udpdata32;
 231        __be16 sport, dport;
 232        struct xfrm_encap_tmpl *encap = x->encap;
 233        struct ip_esp_hdr *esph = esp->esph;
 234
 235        spin_lock_bh(&x->lock);
 236        sport = encap->encap_sport;
 237        dport = encap->encap_dport;
 238        encap_type = encap->encap_type;
 239        spin_unlock_bh(&x->lock);
 240
 241        uh = (struct udphdr *)esph;
 242        uh->source = sport;
 243        uh->dest = dport;
 244        uh->len = htons(skb->len + esp->tailen
 245                  - skb_transport_offset(skb));
 246        uh->check = 0;
 247
 248        switch (encap_type) {
 249        default:
 250        case UDP_ENCAP_ESPINUDP:
 251                esph = (struct ip_esp_hdr *)(uh + 1);
 252                break;
 253        case UDP_ENCAP_ESPINUDP_NON_IKE:
 254                udpdata32 = (__be32 *)(uh + 1);
 255                udpdata32[0] = udpdata32[1] = 0;
 256                esph = (struct ip_esp_hdr *)(udpdata32 + 2);
 257                break;
 258        }
 259
 260        *skb_mac_header(skb) = IPPROTO_UDP;
 261        esp->esph = esph;
 262}
 263
 264int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 265{
 266        u8 *tail;
 267        u8 *vaddr;
 268        int nfrags;
 269        int esph_offset;
 270        struct page *page;
 271        struct sk_buff *trailer;
 272        int tailen = esp->tailen;
 273
 274        /* this is non-NULL only with UDP Encapsulation */
 275        if (x->encap)
 276                esp_output_udp_encap(x, skb, esp);
 277
 278        if (!skb_cloned(skb)) {
 279                if (tailen <= skb_tailroom(skb)) {
 280                        nfrags = 1;
 281                        trailer = skb;
 282                        tail = skb_tail_pointer(trailer);
 283
 284                        goto skip_cow;
 285                } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
 286                           && !skb_has_frag_list(skb)) {
 287                        int allocsize;
 288                        struct sock *sk = skb->sk;
 289                        struct page_frag *pfrag = &x->xfrag;
 290
 291                        esp->inplace = false;
 292
 293                        allocsize = ALIGN(tailen, L1_CACHE_BYTES);
 294
 295                        spin_lock_bh(&x->lock);
 296
 297                        if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
 298                                spin_unlock_bh(&x->lock);
 299                                goto cow;
 300                        }
 301
 302                        page = pfrag->page;
 303                        get_page(page);
 304
 305                        vaddr = kmap_atomic(page);
 306
 307                        tail = vaddr + pfrag->offset;
 308
 309                        esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
 310
 311                        kunmap_atomic(vaddr);
 312
 313                        nfrags = skb_shinfo(skb)->nr_frags;
 314
 315                        __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
 316                                             tailen);
 317                        skb_shinfo(skb)->nr_frags = ++nfrags;
 318
 319                        pfrag->offset = pfrag->offset + allocsize;
 320
 321                        spin_unlock_bh(&x->lock);
 322
 323                        nfrags++;
 324
 325                        skb->len += tailen;
 326                        skb->data_len += tailen;
 327                        skb->truesize += tailen;
 328                        if (sk)
 329                                refcount_add(tailen, &sk->sk_wmem_alloc);
 330
 331                        goto out;
 332                }
 333        }
 334
 335cow:
 336        esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
 337
 338        nfrags = skb_cow_data(skb, tailen, &trailer);
 339        if (nfrags < 0)
 340                goto out;
 341        tail = skb_tail_pointer(trailer);
 342        esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
 343
 344skip_cow:
 345        esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
 346        pskb_put(skb, trailer, tailen);
 347
 348out:
 349        return nfrags;
 350}
 351EXPORT_SYMBOL_GPL(esp_output_head);
 352
 353int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 354{
 355        u8 *iv;
 356        int alen;
 357        void *tmp;
 358        int ivlen;
 359        int assoclen;
 360        int extralen;
 361        struct page *page;
 362        struct ip_esp_hdr *esph;
 363        struct crypto_aead *aead;
 364        struct aead_request *req;
 365        struct scatterlist *sg, *dsg;
 366        struct esp_output_extra *extra;
 367        int err = -ENOMEM;
 368
 369        assoclen = sizeof(struct ip_esp_hdr);
 370        extralen = 0;
 371
 372        if (x->props.flags & XFRM_STATE_ESN) {
 373                extralen += sizeof(*extra);
 374                assoclen += sizeof(__be32);
 375        }
 376
 377        aead = x->data;
 378        alen = crypto_aead_authsize(aead);
 379        ivlen = crypto_aead_ivsize(aead);
 380
 381        tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
 382        if (!tmp)
 383                goto error;
 384
 385        extra = esp_tmp_extra(tmp);
 386        iv = esp_tmp_iv(aead, tmp, extralen);
 387        req = esp_tmp_req(aead, iv);
 388        sg = esp_req_sg(aead, req);
 389
 390        if (esp->inplace)
 391                dsg = sg;
 392        else
 393                dsg = &sg[esp->nfrags];
 394
 395        esph = esp_output_set_extra(skb, x, esp->esph, extra);
 396        esp->esph = esph;
 397
 398        sg_init_table(sg, esp->nfrags);
 399        err = skb_to_sgvec(skb, sg,
 400                           (unsigned char *)esph - skb->data,
 401                           assoclen + ivlen + esp->clen + alen);
 402        if (unlikely(err < 0))
 403                goto error_free;
 404
 405        if (!esp->inplace) {
 406                int allocsize;
 407                struct page_frag *pfrag = &x->xfrag;
 408
 409                allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
 410
 411                spin_lock_bh(&x->lock);
 412                if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
 413                        spin_unlock_bh(&x->lock);
 414                        goto error_free;
 415                }
 416
 417                skb_shinfo(skb)->nr_frags = 1;
 418
 419                page = pfrag->page;
 420                get_page(page);
 421                /* replace page frags in skb with new page */
 422                __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
 423                pfrag->offset = pfrag->offset + allocsize;
 424                spin_unlock_bh(&x->lock);
 425
 426                sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
 427                err = skb_to_sgvec(skb, dsg,
 428                                   (unsigned char *)esph - skb->data,
 429                                   assoclen + ivlen + esp->clen + alen);
 430                if (unlikely(err < 0))
 431                        goto error_free;
 432        }
 433
 434        if ((x->props.flags & XFRM_STATE_ESN))
 435                aead_request_set_callback(req, 0, esp_output_done_esn, skb);
 436        else
 437                aead_request_set_callback(req, 0, esp_output_done, skb);
 438
 439        aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
 440        aead_request_set_ad(req, assoclen);
 441
 442        memset(iv, 0, ivlen);
 443        memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
 444               min(ivlen, 8));
 445
 446        ESP_SKB_CB(skb)->tmp = tmp;
 447        err = crypto_aead_encrypt(req);
 448
 449        switch (err) {
 450        case -EINPROGRESS:
 451                goto error;
 452
 453        case -ENOSPC:
 454                err = NET_XMIT_DROP;
 455                break;
 456
 457        case 0:
 458                if ((x->props.flags & XFRM_STATE_ESN))
 459                        esp_output_restore_header(skb);
 460        }
 461
 462        if (sg != dsg)
 463                esp_ssg_unref(x, tmp);
 464
 465error_free:
 466        kfree(tmp);
 467error:
 468        return err;
 469}
 470EXPORT_SYMBOL_GPL(esp_output_tail);
 471
 472static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 473{
 474        int alen;
 475        int blksize;
 476        struct ip_esp_hdr *esph;
 477        struct crypto_aead *aead;
 478        struct esp_info esp;
 479
 480        esp.inplace = true;
 481
 482        esp.proto = *skb_mac_header(skb);
 483        *skb_mac_header(skb) = IPPROTO_ESP;
 484
 485        /* skb is pure payload to encrypt */
 486
 487        aead = x->data;
 488        alen = crypto_aead_authsize(aead);
 489
 490        esp.tfclen = 0;
 491        if (x->tfcpad) {
 492                struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
 493                u32 padto;
 494
 495                padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
 496                if (skb->len < padto)
 497                        esp.tfclen = padto - skb->len;
 498        }
 499        blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 500        esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
 501        esp.plen = esp.clen - skb->len - esp.tfclen;
 502        esp.tailen = esp.tfclen + esp.plen + alen;
 503
 504        esp.esph = ip_esp_hdr(skb);
 505
 506        esp.nfrags = esp_output_head(x, skb, &esp);
 507        if (esp.nfrags < 0)
 508                return esp.nfrags;
 509
 510        esph = esp.esph;
 511        esph->spi = x->id.spi;
 512
 513        esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 514        esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
 515                                 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
 516
 517        skb_push(skb, -skb_network_offset(skb));
 518
 519        return esp_output_tail(x, skb, &esp);
 520}
 521
 522static inline int esp_remove_trailer(struct sk_buff *skb)
 523{
 524        struct xfrm_state *x = xfrm_input_state(skb);
 525        struct xfrm_offload *xo = xfrm_offload(skb);
 526        struct crypto_aead *aead = x->data;
 527        int alen, hlen, elen;
 528        int padlen, trimlen;
 529        __wsum csumdiff;
 530        u8 nexthdr[2];
 531        int ret;
 532
 533        alen = crypto_aead_authsize(aead);
 534        hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
 535        elen = skb->len - hlen;
 536
 537        if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
 538                ret = xo->proto;
 539                goto out;
 540        }
 541
 542        if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
 543                BUG();
 544
 545        ret = -EINVAL;
 546        padlen = nexthdr[0];
 547        if (padlen + 2 + alen >= elen) {
 548                net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
 549                                    padlen + 2, elen - alen);
 550                goto out;
 551        }
 552
 553        trimlen = alen + padlen + 2;
 554        if (skb->ip_summed == CHECKSUM_COMPLETE) {
 555                csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
 556                skb->csum = csum_block_sub(skb->csum, csumdiff,
 557                                           skb->len - trimlen);
 558        }
 559        pskb_trim(skb, skb->len - trimlen);
 560
 561        ret = nexthdr[1];
 562
 563out:
 564        return ret;
 565}
 566
 567int esp_input_done2(struct sk_buff *skb, int err)
 568{
 569        const struct iphdr *iph;
 570        struct xfrm_state *x = xfrm_input_state(skb);
 571        struct xfrm_offload *xo = xfrm_offload(skb);
 572        struct crypto_aead *aead = x->data;
 573        int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
 574        int ihl;
 575
 576        if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
 577                kfree(ESP_SKB_CB(skb)->tmp);
 578
 579        if (unlikely(err))
 580                goto out;
 581
 582        err = esp_remove_trailer(skb);
 583        if (unlikely(err < 0))
 584                goto out;
 585
 586        iph = ip_hdr(skb);
 587        ihl = iph->ihl * 4;
 588
 589        if (x->encap) {
 590                struct xfrm_encap_tmpl *encap = x->encap;
 591                struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
 592
 593                /*
 594                 * 1) if the NAT-T peer's IP or port changed then
 595                 *    advertize the change to the keying daemon.
 596                 *    This is an inbound SA, so just compare
 597                 *    SRC ports.
 598                 */
 599                if (iph->saddr != x->props.saddr.a4 ||
 600                    uh->source != encap->encap_sport) {
 601                        xfrm_address_t ipaddr;
 602
 603                        ipaddr.a4 = iph->saddr;
 604                        km_new_mapping(x, &ipaddr, uh->source);
 605
 606                        /* XXX: perhaps add an extra
 607                         * policy check here, to see
 608                         * if we should allow or
 609                         * reject a packet from a
 610                         * different source
 611                         * address/port.
 612                         */
 613                }
 614
 615                /*
 616                 * 2) ignore UDP/TCP checksums in case
 617                 *    of NAT-T in Transport Mode, or
 618                 *    perform other post-processing fixes
 619                 *    as per draft-ietf-ipsec-udp-encaps-06,
 620                 *    section 3.1.2
 621                 */
 622                if (x->props.mode == XFRM_MODE_TRANSPORT)
 623                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 624        }
 625
 626        skb_pull_rcsum(skb, hlen);
 627        if (x->props.mode == XFRM_MODE_TUNNEL)
 628                skb_reset_transport_header(skb);
 629        else
 630                skb_set_transport_header(skb, -ihl);
 631
 632        /* RFC4303: Drop dummy packets without any error */
 633        if (err == IPPROTO_NONE)
 634                err = -EINVAL;
 635
 636out:
 637        return err;
 638}
 639EXPORT_SYMBOL_GPL(esp_input_done2);
 640
 641static void esp_input_done(struct crypto_async_request *base, int err)
 642{
 643        struct sk_buff *skb = base->data;
 644
 645        xfrm_input_resume(skb, esp_input_done2(skb, err));
 646}
 647
 648static void esp_input_restore_header(struct sk_buff *skb)
 649{
 650        esp_restore_header(skb, 0);
 651        __skb_pull(skb, 4);
 652}
 653
 654static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
 655{
 656        struct xfrm_state *x = xfrm_input_state(skb);
 657        struct ip_esp_hdr *esph;
 658
 659        /* For ESN we move the header forward by 4 bytes to
 660         * accomodate the high bits.  We will move it back after
 661         * decryption.
 662         */
 663        if ((x->props.flags & XFRM_STATE_ESN)) {
 664                esph = skb_push(skb, 4);
 665                *seqhi = esph->spi;
 666                esph->spi = esph->seq_no;
 667                esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
 668        }
 669}
 670
 671static void esp_input_done_esn(struct crypto_async_request *base, int err)
 672{
 673        struct sk_buff *skb = base->data;
 674
 675        esp_input_restore_header(skb);
 676        esp_input_done(base, err);
 677}
 678
 679/*
 680 * Note: detecting truncated vs. non-truncated authentication data is very
 681 * expensive, so we only support truncated data, which is the recommended
 682 * and common case.
 683 */
 684static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
 685{
 686        struct ip_esp_hdr *esph;
 687        struct crypto_aead *aead = x->data;
 688        struct aead_request *req;
 689        struct sk_buff *trailer;
 690        int ivlen = crypto_aead_ivsize(aead);
 691        int elen = skb->len - sizeof(*esph) - ivlen;
 692        int nfrags;
 693        int assoclen;
 694        int seqhilen;
 695        __be32 *seqhi;
 696        void *tmp;
 697        u8 *iv;
 698        struct scatterlist *sg;
 699        int err = -EINVAL;
 700
 701        if (!pskb_may_pull(skb, sizeof(*esph) + ivlen))
 702                goto out;
 703
 704        if (elen <= 0)
 705                goto out;
 706
 707        assoclen = sizeof(*esph);
 708        seqhilen = 0;
 709
 710        if (x->props.flags & XFRM_STATE_ESN) {
 711                seqhilen += sizeof(__be32);
 712                assoclen += seqhilen;
 713        }
 714
 715        if (!skb_cloned(skb)) {
 716                if (!skb_is_nonlinear(skb)) {
 717                        nfrags = 1;
 718
 719                        goto skip_cow;
 720                } else if (!skb_has_frag_list(skb)) {
 721                        nfrags = skb_shinfo(skb)->nr_frags;
 722                        nfrags++;
 723
 724                        goto skip_cow;
 725                }
 726        }
 727
 728        err = skb_cow_data(skb, 0, &trailer);
 729        if (err < 0)
 730                goto out;
 731
 732        nfrags = err;
 733
 734skip_cow:
 735        err = -ENOMEM;
 736        tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
 737        if (!tmp)
 738                goto out;
 739
 740        ESP_SKB_CB(skb)->tmp = tmp;
 741        seqhi = esp_tmp_extra(tmp);
 742        iv = esp_tmp_iv(aead, tmp, seqhilen);
 743        req = esp_tmp_req(aead, iv);
 744        sg = esp_req_sg(aead, req);
 745
 746        esp_input_set_header(skb, seqhi);
 747
 748        sg_init_table(sg, nfrags);
 749        err = skb_to_sgvec(skb, sg, 0, skb->len);
 750        if (unlikely(err < 0)) {
 751                kfree(tmp);
 752                goto out;
 753        }
 754
 755        skb->ip_summed = CHECKSUM_NONE;
 756
 757        if ((x->props.flags & XFRM_STATE_ESN))
 758                aead_request_set_callback(req, 0, esp_input_done_esn, skb);
 759        else
 760                aead_request_set_callback(req, 0, esp_input_done, skb);
 761
 762        aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
 763        aead_request_set_ad(req, assoclen);
 764
 765        err = crypto_aead_decrypt(req);
 766        if (err == -EINPROGRESS)
 767                goto out;
 768
 769        if ((x->props.flags & XFRM_STATE_ESN))
 770                esp_input_restore_header(skb);
 771
 772        err = esp_input_done2(skb, err);
 773
 774out:
 775        return err;
 776}
 777
 778static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
 779{
 780        struct crypto_aead *aead = x->data;
 781        u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 782        unsigned int net_adj;
 783
 784        switch (x->props.mode) {
 785        case XFRM_MODE_TRANSPORT:
 786        case XFRM_MODE_BEET:
 787                net_adj = sizeof(struct iphdr);
 788                break;
 789        case XFRM_MODE_TUNNEL:
 790                net_adj = 0;
 791                break;
 792        default:
 793                BUG();
 794        }
 795
 796        return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
 797                 net_adj) & ~(blksize - 1)) + net_adj - 2;
 798}
 799
 800static int esp4_err(struct sk_buff *skb, u32 info)
 801{
 802        struct net *net = dev_net(skb->dev);
 803        const struct iphdr *iph = (const struct iphdr *)skb->data;
 804        struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
 805        struct xfrm_state *x;
 806
 807        switch (icmp_hdr(skb)->type) {
 808        case ICMP_DEST_UNREACH:
 809                if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
 810                        return 0;
 811        case ICMP_REDIRECT:
 812                break;
 813        default:
 814                return 0;
 815        }
 816
 817        x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
 818                              esph->spi, IPPROTO_ESP, AF_INET);
 819        if (!x)
 820                return 0;
 821
 822        if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
 823                ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
 824        else
 825                ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
 826        xfrm_state_put(x);
 827
 828        return 0;
 829}
 830
 831static void esp_destroy(struct xfrm_state *x)
 832{
 833        struct crypto_aead *aead = x->data;
 834
 835        if (!aead)
 836                return;
 837
 838        crypto_free_aead(aead);
 839}
 840
 841static int esp_init_aead(struct xfrm_state *x)
 842{
 843        char aead_name[CRYPTO_MAX_ALG_NAME];
 844        struct crypto_aead *aead;
 845        int err;
 846
 847        err = -ENAMETOOLONG;
 848        if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
 849                     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
 850                goto error;
 851
 852        aead = crypto_alloc_aead(aead_name, 0, 0);
 853        err = PTR_ERR(aead);
 854        if (IS_ERR(aead))
 855                goto error;
 856
 857        x->data = aead;
 858
 859        err = crypto_aead_setkey(aead, x->aead->alg_key,
 860                                 (x->aead->alg_key_len + 7) / 8);
 861        if (err)
 862                goto error;
 863
 864        err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
 865        if (err)
 866                goto error;
 867
 868error:
 869        return err;
 870}
 871
 872static int esp_init_authenc(struct xfrm_state *x)
 873{
 874        struct crypto_aead *aead;
 875        struct crypto_authenc_key_param *param;
 876        struct rtattr *rta;
 877        char *key;
 878        char *p;
 879        char authenc_name[CRYPTO_MAX_ALG_NAME];
 880        unsigned int keylen;
 881        int err;
 882
 883        err = -EINVAL;
 884        if (!x->ealg)
 885                goto error;
 886
 887        err = -ENAMETOOLONG;
 888
 889        if ((x->props.flags & XFRM_STATE_ESN)) {
 890                if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
 891                             "%s%sauthencesn(%s,%s)%s",
 892                             x->geniv ?: "", x->geniv ? "(" : "",
 893                             x->aalg ? x->aalg->alg_name : "digest_null",
 894                             x->ealg->alg_name,
 895                             x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
 896                        goto error;
 897        } else {
 898                if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
 899                             "%s%sauthenc(%s,%s)%s",
 900                             x->geniv ?: "", x->geniv ? "(" : "",
 901                             x->aalg ? x->aalg->alg_name : "digest_null",
 902                             x->ealg->alg_name,
 903                             x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
 904                        goto error;
 905        }
 906
 907        aead = crypto_alloc_aead(authenc_name, 0, 0);
 908        err = PTR_ERR(aead);
 909        if (IS_ERR(aead))
 910                goto error;
 911
 912        x->data = aead;
 913
 914        keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
 915                 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
 916        err = -ENOMEM;
 917        key = kmalloc(keylen, GFP_KERNEL);
 918        if (!key)
 919                goto error;
 920
 921        p = key;
 922        rta = (void *)p;
 923        rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
 924        rta->rta_len = RTA_LENGTH(sizeof(*param));
 925        param = RTA_DATA(rta);
 926        p += RTA_SPACE(sizeof(*param));
 927
 928        if (x->aalg) {
 929                struct xfrm_algo_desc *aalg_desc;
 930
 931                memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
 932                p += (x->aalg->alg_key_len + 7) / 8;
 933
 934                aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
 935                BUG_ON(!aalg_desc);
 936
 937                err = -EINVAL;
 938                if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
 939                    crypto_aead_authsize(aead)) {
 940                        pr_info("ESP: %s digestsize %u != %hu\n",
 941                                x->aalg->alg_name,
 942                                crypto_aead_authsize(aead),
 943                                aalg_desc->uinfo.auth.icv_fullbits / 8);
 944                        goto free_key;
 945                }
 946
 947                err = crypto_aead_setauthsize(
 948                        aead, x->aalg->alg_trunc_len / 8);
 949                if (err)
 950                        goto free_key;
 951        }
 952
 953        param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
 954        memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
 955
 956        err = crypto_aead_setkey(aead, key, keylen);
 957
 958free_key:
 959        kfree(key);
 960
 961error:
 962        return err;
 963}
 964
 965static int esp_init_state(struct xfrm_state *x)
 966{
 967        struct crypto_aead *aead;
 968        u32 align;
 969        int err;
 970
 971        x->data = NULL;
 972
 973        if (x->aead)
 974                err = esp_init_aead(x);
 975        else
 976                err = esp_init_authenc(x);
 977
 978        if (err)
 979                goto error;
 980
 981        aead = x->data;
 982
 983        x->props.header_len = sizeof(struct ip_esp_hdr) +
 984                              crypto_aead_ivsize(aead);
 985        if (x->props.mode == XFRM_MODE_TUNNEL)
 986                x->props.header_len += sizeof(struct iphdr);
 987        else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
 988                x->props.header_len += IPV4_BEET_PHMAXLEN;
 989        if (x->encap) {
 990                struct xfrm_encap_tmpl *encap = x->encap;
 991
 992                switch (encap->encap_type) {
 993                default:
 994                        err = -EINVAL;
 995                        goto error;
 996                case UDP_ENCAP_ESPINUDP:
 997                        x->props.header_len += sizeof(struct udphdr);
 998                        break;
 999                case UDP_ENCAP_ESPINUDP_NON_IKE:
1000                        x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
1001                        break;
1002                }
1003        }
1004
1005        align = ALIGN(crypto_aead_blocksize(aead), 4);
1006        x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1007
1008error:
1009        return err;
1010}
1011
1012static int esp4_rcv_cb(struct sk_buff *skb, int err)
1013{
1014        return 0;
1015}
1016
1017static const struct xfrm_type esp_type =
1018{
1019        .description    = "ESP4",
1020        .owner          = THIS_MODULE,
1021        .proto          = IPPROTO_ESP,
1022        .flags          = XFRM_TYPE_REPLAY_PROT,
1023        .init_state     = esp_init_state,
1024        .destructor     = esp_destroy,
1025        .get_mtu        = esp4_get_mtu,
1026        .input          = esp_input,
1027        .output         = esp_output,
1028};
1029
1030static struct xfrm4_protocol esp4_protocol = {
1031        .handler        =       xfrm4_rcv,
1032        .input_handler  =       xfrm_input,
1033        .cb_handler     =       esp4_rcv_cb,
1034        .err_handler    =       esp4_err,
1035        .priority       =       0,
1036};
1037
1038static int __init esp4_init(void)
1039{
1040        if (xfrm_register_type(&esp_type, AF_INET) < 0) {
1041                pr_info("%s: can't add xfrm type\n", __func__);
1042                return -EAGAIN;
1043        }
1044        if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
1045                pr_info("%s: can't add protocol\n", __func__);
1046                xfrm_unregister_type(&esp_type, AF_INET);
1047                return -EAGAIN;
1048        }
1049        return 0;
1050}
1051
1052static void __exit esp4_fini(void)
1053{
1054        if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
1055                pr_info("%s: can't remove protocol\n", __func__);
1056        if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
1057                pr_info("%s: can't remove xfrm type\n", __func__);
1058}
1059
1060module_init(esp4_init);
1061module_exit(esp4_fini);
1062MODULE_LICENSE("GPL");
1063MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);
1064