linux/net/ipv6/esp6.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C)2002 USAGI/WIDE Project
   4 *
   5 * Authors
   6 *
   7 *      Mitsuru KANDA @USAGI       : IPv6 Support
   8 *      Kazunori MIYAZAWA @USAGI   :
   9 *      Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  10 *
  11 *      This file is derived from net/ipv4/esp.c
  12 */
  13
  14#define pr_fmt(fmt) "IPv6: " fmt
  15
  16#include <crypto/aead.h>
  17#include <crypto/authenc.h>
  18#include <linux/err.h>
  19#include <linux/module.h>
  20#include <net/ip.h>
  21#include <net/xfrm.h>
  22#include <net/esp.h>
  23#include <linux/scatterlist.h>
  24#include <linux/kernel.h>
  25#include <linux/pfkeyv2.h>
  26#include <linux/random.h>
  27#include <linux/slab.h>
  28#include <linux/spinlock.h>
  29#include <net/ip6_checksum.h>
  30#include <net/ip6_route.h>
  31#include <net/icmp.h>
  32#include <net/ipv6.h>
  33#include <net/protocol.h>
  34#include <net/udp.h>
  35#include <linux/icmpv6.h>
  36#include <net/tcp.h>
  37#include <net/espintcp.h>
  38#include <net/inet6_hashtables.h>
  39
  40#include <linux/highmem.h>
  41
  42struct esp_skb_cb {
  43        struct xfrm_skb_cb xfrm;
  44        void *tmp;
  45};
  46
  47struct esp_output_extra {
  48        __be32 seqhi;
  49        u32 esphoff;
  50};
  51
  52#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
  53
  54/*
  55 * Allocate an AEAD request structure with extra space for SG and IV.
  56 *
  57 * For alignment considerations the upper 32 bits of the sequence number are
  58 * placed at the front, if present. Followed by the IV, the request and finally
  59 * the SG list.
  60 *
  61 * TODO: Use spare space in skb for this where possible.
  62 */
  63static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
  64{
  65        unsigned int len;
  66
  67        len = seqihlen;
  68
  69        len += crypto_aead_ivsize(aead);
  70
  71        if (len) {
  72                len += crypto_aead_alignmask(aead) &
  73                       ~(crypto_tfm_ctx_alignment() - 1);
  74                len = ALIGN(len, crypto_tfm_ctx_alignment());
  75        }
  76
  77        len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
  78        len = ALIGN(len, __alignof__(struct scatterlist));
  79
  80        len += sizeof(struct scatterlist) * nfrags;
  81
  82        return kmalloc(len, GFP_ATOMIC);
  83}
  84
  85static inline void *esp_tmp_extra(void *tmp)
  86{
  87        return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
  88}
  89
  90static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
  91{
  92        return crypto_aead_ivsize(aead) ?
  93               PTR_ALIGN((u8 *)tmp + seqhilen,
  94                         crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
  95}
  96
  97static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
  98{
  99        struct aead_request *req;
 100
 101        req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
 102                                crypto_tfm_ctx_alignment());
 103        aead_request_set_tfm(req, aead);
 104        return req;
 105}
 106
 107static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
 108                                             struct aead_request *req)
 109{
 110        return (void *)ALIGN((unsigned long)(req + 1) +
 111                             crypto_aead_reqsize(aead),
 112                             __alignof__(struct scatterlist));
 113}
 114
 115static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
 116{
 117        struct esp_output_extra *extra = esp_tmp_extra(tmp);
 118        struct crypto_aead *aead = x->data;
 119        int extralen = 0;
 120        u8 *iv;
 121        struct aead_request *req;
 122        struct scatterlist *sg;
 123
 124        if (x->props.flags & XFRM_STATE_ESN)
 125                extralen += sizeof(*extra);
 126
 127        iv = esp_tmp_iv(aead, tmp, extralen);
 128        req = esp_tmp_req(aead, iv);
 129
 130        /* Unref skb_frag_pages in the src scatterlist if necessary.
 131         * Skip the first sg which comes from skb->data.
 132         */
 133        if (req->src != req->dst)
 134                for (sg = sg_next(req->src); sg; sg = sg_next(sg))
 135                        put_page(sg_page(sg));
 136}
 137
 138#ifdef CONFIG_INET6_ESPINTCP
 139struct esp_tcp_sk {
 140        struct sock *sk;
 141        struct rcu_head rcu;
 142};
 143
 144static void esp_free_tcp_sk(struct rcu_head *head)
 145{
 146        struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
 147
 148        sock_put(esk->sk);
 149        kfree(esk);
 150}
 151
 152static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
 153{
 154        struct xfrm_encap_tmpl *encap = x->encap;
 155        struct esp_tcp_sk *esk;
 156        __be16 sport, dport;
 157        struct sock *nsk;
 158        struct sock *sk;
 159
 160        sk = rcu_dereference(x->encap_sk);
 161        if (sk && sk->sk_state == TCP_ESTABLISHED)
 162                return sk;
 163
 164        spin_lock_bh(&x->lock);
 165        sport = encap->encap_sport;
 166        dport = encap->encap_dport;
 167        nsk = rcu_dereference_protected(x->encap_sk,
 168                                        lockdep_is_held(&x->lock));
 169        if (sk && sk == nsk) {
 170                esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
 171                if (!esk) {
 172                        spin_unlock_bh(&x->lock);
 173                        return ERR_PTR(-ENOMEM);
 174                }
 175                RCU_INIT_POINTER(x->encap_sk, NULL);
 176                esk->sk = sk;
 177                call_rcu(&esk->rcu, esp_free_tcp_sk);
 178        }
 179        spin_unlock_bh(&x->lock);
 180
 181        sk = __inet6_lookup_established(xs_net(x), &tcp_hashinfo, &x->id.daddr.in6,
 182                                        dport, &x->props.saddr.in6, ntohs(sport), 0, 0);
 183        if (!sk)
 184                return ERR_PTR(-ENOENT);
 185
 186        if (!tcp_is_ulp_esp(sk)) {
 187                sock_put(sk);
 188                return ERR_PTR(-EINVAL);
 189        }
 190
 191        spin_lock_bh(&x->lock);
 192        nsk = rcu_dereference_protected(x->encap_sk,
 193                                        lockdep_is_held(&x->lock));
 194        if (encap->encap_sport != sport ||
 195            encap->encap_dport != dport) {
 196                sock_put(sk);
 197                sk = nsk ?: ERR_PTR(-EREMCHG);
 198        } else if (sk == nsk) {
 199                sock_put(sk);
 200        } else {
 201                rcu_assign_pointer(x->encap_sk, sk);
 202        }
 203        spin_unlock_bh(&x->lock);
 204
 205        return sk;
 206}
 207
 208static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
 209{
 210        struct sock *sk;
 211        int err;
 212
 213        rcu_read_lock();
 214
 215        sk = esp6_find_tcp_sk(x);
 216        err = PTR_ERR_OR_ZERO(sk);
 217        if (err)
 218                goto out;
 219
 220        bh_lock_sock(sk);
 221        if (sock_owned_by_user(sk))
 222                err = espintcp_queue_out(sk, skb);
 223        else
 224                err = espintcp_push_skb(sk, skb);
 225        bh_unlock_sock(sk);
 226
 227out:
 228        rcu_read_unlock();
 229        return err;
 230}
 231
 232static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
 233                                   struct sk_buff *skb)
 234{
 235        struct dst_entry *dst = skb_dst(skb);
 236        struct xfrm_state *x = dst->xfrm;
 237
 238        return esp_output_tcp_finish(x, skb);
 239}
 240
 241static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
 242{
 243        int err;
 244
 245        local_bh_disable();
 246        err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
 247        local_bh_enable();
 248
 249        /* EINPROGRESS just happens to do the right thing.  It
 250         * actually means that the skb has been consumed and
 251         * isn't coming back.
 252         */
 253        return err ?: -EINPROGRESS;
 254}
 255#else
 256static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
 257{
 258        kfree_skb(skb);
 259
 260        return -EOPNOTSUPP;
 261}
 262#endif
 263
 264static void esp_output_encap_csum(struct sk_buff *skb)
 265{
 266        /* UDP encap with IPv6 requires a valid checksum */
 267        if (*skb_mac_header(skb) == IPPROTO_UDP) {
 268                struct udphdr *uh = udp_hdr(skb);
 269                struct ipv6hdr *ip6h = ipv6_hdr(skb);
 270                int len = ntohs(uh->len);
 271                unsigned int offset = skb_transport_offset(skb);
 272                __wsum csum = skb_checksum(skb, offset, skb->len - offset, 0);
 273
 274                uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
 275                                            len, IPPROTO_UDP, csum);
 276                if (uh->check == 0)
 277                        uh->check = CSUM_MANGLED_0;
 278        }
 279}
 280
 281static void esp_output_done(struct crypto_async_request *base, int err)
 282{
 283        struct sk_buff *skb = base->data;
 284        struct xfrm_offload *xo = xfrm_offload(skb);
 285        void *tmp;
 286        struct xfrm_state *x;
 287
 288        if (xo && (xo->flags & XFRM_DEV_RESUME)) {
 289                struct sec_path *sp = skb_sec_path(skb);
 290
 291                x = sp->xvec[sp->len - 1];
 292        } else {
 293                x = skb_dst(skb)->xfrm;
 294        }
 295
 296        tmp = ESP_SKB_CB(skb)->tmp;
 297        esp_ssg_unref(x, tmp);
 298        kfree(tmp);
 299
 300        esp_output_encap_csum(skb);
 301
 302        if (xo && (xo->flags & XFRM_DEV_RESUME)) {
 303                if (err) {
 304                        XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
 305                        kfree_skb(skb);
 306                        return;
 307                }
 308
 309                skb_push(skb, skb->data - skb_mac_header(skb));
 310                secpath_reset(skb);
 311                xfrm_dev_resume(skb);
 312        } else {
 313                if (!err &&
 314                    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
 315                        esp_output_tail_tcp(x, skb);
 316                else
 317                        xfrm_output_resume(skb, err);
 318        }
 319}
 320
 321/* Move ESP header back into place. */
 322static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
 323{
 324        struct ip_esp_hdr *esph = (void *)(skb->data + offset);
 325        void *tmp = ESP_SKB_CB(skb)->tmp;
 326        __be32 *seqhi = esp_tmp_extra(tmp);
 327
 328        esph->seq_no = esph->spi;
 329        esph->spi = *seqhi;
 330}
 331
 332static void esp_output_restore_header(struct sk_buff *skb)
 333{
 334        void *tmp = ESP_SKB_CB(skb)->tmp;
 335        struct esp_output_extra *extra = esp_tmp_extra(tmp);
 336
 337        esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
 338                                sizeof(__be32));
 339}
 340
 341static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
 342                                             struct xfrm_state *x,
 343                                             struct ip_esp_hdr *esph,
 344                                             struct esp_output_extra *extra)
 345{
 346        /* For ESN we move the header forward by 4 bytes to
 347         * accomodate the high bits.  We will move it back after
 348         * encryption.
 349         */
 350        if ((x->props.flags & XFRM_STATE_ESN)) {
 351                __u32 seqhi;
 352                struct xfrm_offload *xo = xfrm_offload(skb);
 353
 354                if (xo)
 355                        seqhi = xo->seq.hi;
 356                else
 357                        seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
 358
 359                extra->esphoff = (unsigned char *)esph -
 360                                 skb_transport_header(skb);
 361                esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
 362                extra->seqhi = esph->spi;
 363                esph->seq_no = htonl(seqhi);
 364        }
 365
 366        esph->spi = x->id.spi;
 367
 368        return esph;
 369}
 370
 371static void esp_output_done_esn(struct crypto_async_request *base, int err)
 372{
 373        struct sk_buff *skb = base->data;
 374
 375        esp_output_restore_header(skb);
 376        esp_output_done(base, err);
 377}
 378
 379static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb,
 380                                               int encap_type,
 381                                               struct esp_info *esp,
 382                                               __be16 sport,
 383                                               __be16 dport)
 384{
 385        struct udphdr *uh;
 386        __be32 *udpdata32;
 387        unsigned int len;
 388
 389        len = skb->len + esp->tailen - skb_transport_offset(skb);
 390        if (len > U16_MAX)
 391                return ERR_PTR(-EMSGSIZE);
 392
 393        uh = (struct udphdr *)esp->esph;
 394        uh->source = sport;
 395        uh->dest = dport;
 396        uh->len = htons(len);
 397        uh->check = 0;
 398
 399        *skb_mac_header(skb) = IPPROTO_UDP;
 400
 401        if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) {
 402                udpdata32 = (__be32 *)(uh + 1);
 403                udpdata32[0] = udpdata32[1] = 0;
 404                return (struct ip_esp_hdr *)(udpdata32 + 2);
 405        }
 406
 407        return (struct ip_esp_hdr *)(uh + 1);
 408}
 409
 410#ifdef CONFIG_INET6_ESPINTCP
 411static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
 412                                                struct sk_buff *skb,
 413                                                struct esp_info *esp)
 414{
 415        __be16 *lenp = (void *)esp->esph;
 416        struct ip_esp_hdr *esph;
 417        unsigned int len;
 418        struct sock *sk;
 419
 420        len = skb->len + esp->tailen - skb_transport_offset(skb);
 421        if (len > IP_MAX_MTU)
 422                return ERR_PTR(-EMSGSIZE);
 423
 424        rcu_read_lock();
 425        sk = esp6_find_tcp_sk(x);
 426        rcu_read_unlock();
 427
 428        if (IS_ERR(sk))
 429                return ERR_CAST(sk);
 430
 431        *lenp = htons(len);
 432        esph = (struct ip_esp_hdr *)(lenp + 1);
 433
 434        return esph;
 435}
 436#else
 437static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
 438                                                struct sk_buff *skb,
 439                                                struct esp_info *esp)
 440{
 441        return ERR_PTR(-EOPNOTSUPP);
 442}
 443#endif
 444
 445static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
 446                            struct esp_info *esp)
 447{
 448        struct xfrm_encap_tmpl *encap = x->encap;
 449        struct ip_esp_hdr *esph;
 450        __be16 sport, dport;
 451        int encap_type;
 452
 453        spin_lock_bh(&x->lock);
 454        sport = encap->encap_sport;
 455        dport = encap->encap_dport;
 456        encap_type = encap->encap_type;
 457        spin_unlock_bh(&x->lock);
 458
 459        switch (encap_type) {
 460        default:
 461        case UDP_ENCAP_ESPINUDP:
 462        case UDP_ENCAP_ESPINUDP_NON_IKE:
 463                esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport);
 464                break;
 465        case TCP_ENCAP_ESPINTCP:
 466                esph = esp6_output_tcp_encap(x, skb, esp);
 467                break;
 468        }
 469
 470        if (IS_ERR(esph))
 471                return PTR_ERR(esph);
 472
 473        esp->esph = esph;
 474
 475        return 0;
 476}
 477
 478int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 479{
 480        u8 *tail;
 481        u8 *vaddr;
 482        int nfrags;
 483        int esph_offset;
 484        struct page *page;
 485        struct sk_buff *trailer;
 486        int tailen = esp->tailen;
 487
 488        if (x->encap) {
 489                int err = esp6_output_encap(x, skb, esp);
 490
 491                if (err < 0)
 492                        return err;
 493        }
 494
 495        if (!skb_cloned(skb)) {
 496                if (tailen <= skb_tailroom(skb)) {
 497                        nfrags = 1;
 498                        trailer = skb;
 499                        tail = skb_tail_pointer(trailer);
 500
 501                        goto skip_cow;
 502                } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
 503                           && !skb_has_frag_list(skb)) {
 504                        int allocsize;
 505                        struct sock *sk = skb->sk;
 506                        struct page_frag *pfrag = &x->xfrag;
 507
 508                        esp->inplace = false;
 509
 510                        allocsize = ALIGN(tailen, L1_CACHE_BYTES);
 511
 512                        spin_lock_bh(&x->lock);
 513
 514                        if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
 515                                spin_unlock_bh(&x->lock);
 516                                goto cow;
 517                        }
 518
 519                        page = pfrag->page;
 520                        get_page(page);
 521
 522                        vaddr = kmap_atomic(page);
 523
 524                        tail = vaddr + pfrag->offset;
 525
 526                        esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
 527
 528                        kunmap_atomic(vaddr);
 529
 530                        nfrags = skb_shinfo(skb)->nr_frags;
 531
 532                        __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
 533                                             tailen);
 534                        skb_shinfo(skb)->nr_frags = ++nfrags;
 535
 536                        pfrag->offset = pfrag->offset + allocsize;
 537
 538                        spin_unlock_bh(&x->lock);
 539
 540                        nfrags++;
 541
 542                        skb->len += tailen;
 543                        skb->data_len += tailen;
 544                        skb->truesize += tailen;
 545                        if (sk && sk_fullsock(sk))
 546                                refcount_add(tailen, &sk->sk_wmem_alloc);
 547
 548                        goto out;
 549                }
 550        }
 551
 552cow:
 553        esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
 554
 555        nfrags = skb_cow_data(skb, tailen, &trailer);
 556        if (nfrags < 0)
 557                goto out;
 558        tail = skb_tail_pointer(trailer);
 559        esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
 560
 561skip_cow:
 562        esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
 563        pskb_put(skb, trailer, tailen);
 564
 565out:
 566        return nfrags;
 567}
 568EXPORT_SYMBOL_GPL(esp6_output_head);
 569
 570int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 571{
 572        u8 *iv;
 573        int alen;
 574        void *tmp;
 575        int ivlen;
 576        int assoclen;
 577        int extralen;
 578        struct page *page;
 579        struct ip_esp_hdr *esph;
 580        struct aead_request *req;
 581        struct crypto_aead *aead;
 582        struct scatterlist *sg, *dsg;
 583        struct esp_output_extra *extra;
 584        int err = -ENOMEM;
 585
 586        assoclen = sizeof(struct ip_esp_hdr);
 587        extralen = 0;
 588
 589        if (x->props.flags & XFRM_STATE_ESN) {
 590                extralen += sizeof(*extra);
 591                assoclen += sizeof(__be32);
 592        }
 593
 594        aead = x->data;
 595        alen = crypto_aead_authsize(aead);
 596        ivlen = crypto_aead_ivsize(aead);
 597
 598        tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
 599        if (!tmp)
 600                goto error;
 601
 602        extra = esp_tmp_extra(tmp);
 603        iv = esp_tmp_iv(aead, tmp, extralen);
 604        req = esp_tmp_req(aead, iv);
 605        sg = esp_req_sg(aead, req);
 606
 607        if (esp->inplace)
 608                dsg = sg;
 609        else
 610                dsg = &sg[esp->nfrags];
 611
 612        esph = esp_output_set_esn(skb, x, esp->esph, extra);
 613        esp->esph = esph;
 614
 615        sg_init_table(sg, esp->nfrags);
 616        err = skb_to_sgvec(skb, sg,
 617                           (unsigned char *)esph - skb->data,
 618                           assoclen + ivlen + esp->clen + alen);
 619        if (unlikely(err < 0))
 620                goto error_free;
 621
 622        if (!esp->inplace) {
 623                int allocsize;
 624                struct page_frag *pfrag = &x->xfrag;
 625
 626                allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
 627
 628                spin_lock_bh(&x->lock);
 629                if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
 630                        spin_unlock_bh(&x->lock);
 631                        goto error_free;
 632                }
 633
 634                skb_shinfo(skb)->nr_frags = 1;
 635
 636                page = pfrag->page;
 637                get_page(page);
 638                /* replace page frags in skb with new page */
 639                __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
 640                pfrag->offset = pfrag->offset + allocsize;
 641                spin_unlock_bh(&x->lock);
 642
 643                sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
 644                err = skb_to_sgvec(skb, dsg,
 645                                   (unsigned char *)esph - skb->data,
 646                                   assoclen + ivlen + esp->clen + alen);
 647                if (unlikely(err < 0))
 648                        goto error_free;
 649        }
 650
 651        if ((x->props.flags & XFRM_STATE_ESN))
 652                aead_request_set_callback(req, 0, esp_output_done_esn, skb);
 653        else
 654                aead_request_set_callback(req, 0, esp_output_done, skb);
 655
 656        aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
 657        aead_request_set_ad(req, assoclen);
 658
 659        memset(iv, 0, ivlen);
 660        memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
 661               min(ivlen, 8));
 662
 663        ESP_SKB_CB(skb)->tmp = tmp;
 664        err = crypto_aead_encrypt(req);
 665
 666        switch (err) {
 667        case -EINPROGRESS:
 668                goto error;
 669
 670        case -ENOSPC:
 671                err = NET_XMIT_DROP;
 672                break;
 673
 674        case 0:
 675                if ((x->props.flags & XFRM_STATE_ESN))
 676                        esp_output_restore_header(skb);
 677                esp_output_encap_csum(skb);
 678        }
 679
 680        if (sg != dsg)
 681                esp_ssg_unref(x, tmp);
 682
 683        if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
 684                err = esp_output_tail_tcp(x, skb);
 685
 686error_free:
 687        kfree(tmp);
 688error:
 689        return err;
 690}
 691EXPORT_SYMBOL_GPL(esp6_output_tail);
 692
 693static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
 694{
 695        int alen;
 696        int blksize;
 697        struct ip_esp_hdr *esph;
 698        struct crypto_aead *aead;
 699        struct esp_info esp;
 700
 701        esp.inplace = true;
 702
 703        esp.proto = *skb_mac_header(skb);
 704        *skb_mac_header(skb) = IPPROTO_ESP;
 705
 706        /* skb is pure payload to encrypt */
 707
 708        aead = x->data;
 709        alen = crypto_aead_authsize(aead);
 710
 711        esp.tfclen = 0;
 712        if (x->tfcpad) {
 713                struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
 714                u32 padto;
 715
 716                padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
 717                if (skb->len < padto)
 718                        esp.tfclen = padto - skb->len;
 719        }
 720        blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 721        esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
 722        esp.plen = esp.clen - skb->len - esp.tfclen;
 723        esp.tailen = esp.tfclen + esp.plen + alen;
 724
 725        esp.esph = ip_esp_hdr(skb);
 726
 727        esp.nfrags = esp6_output_head(x, skb, &esp);
 728        if (esp.nfrags < 0)
 729                return esp.nfrags;
 730
 731        esph = esp.esph;
 732        esph->spi = x->id.spi;
 733
 734        esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 735        esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
 736                            ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
 737
 738        skb_push(skb, -skb_network_offset(skb));
 739
 740        return esp6_output_tail(x, skb, &esp);
 741}
 742
 743static inline int esp_remove_trailer(struct sk_buff *skb)
 744{
 745        struct xfrm_state *x = xfrm_input_state(skb);
 746        struct xfrm_offload *xo = xfrm_offload(skb);
 747        struct crypto_aead *aead = x->data;
 748        int alen, hlen, elen;
 749        int padlen, trimlen;
 750        __wsum csumdiff;
 751        u8 nexthdr[2];
 752        int ret;
 753
 754        alen = crypto_aead_authsize(aead);
 755        hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
 756        elen = skb->len - hlen;
 757
 758        if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
 759                ret = xo->proto;
 760                goto out;
 761        }
 762
 763        ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
 764        BUG_ON(ret);
 765
 766        ret = -EINVAL;
 767        padlen = nexthdr[0];
 768        if (padlen + 2 + alen >= elen) {
 769                net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
 770                                    padlen + 2, elen - alen);
 771                goto out;
 772        }
 773
 774        trimlen = alen + padlen + 2;
 775        if (skb->ip_summed == CHECKSUM_COMPLETE) {
 776                csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
 777                skb->csum = csum_block_sub(skb->csum, csumdiff,
 778                                           skb->len - trimlen);
 779        }
 780        pskb_trim(skb, skb->len - trimlen);
 781
 782        ret = nexthdr[1];
 783
 784out:
 785        return ret;
 786}
 787
 788int esp6_input_done2(struct sk_buff *skb, int err)
 789{
 790        struct xfrm_state *x = xfrm_input_state(skb);
 791        struct xfrm_offload *xo = xfrm_offload(skb);
 792        struct crypto_aead *aead = x->data;
 793        int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
 794        int hdr_len = skb_network_header_len(skb);
 795
 796        if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
 797                kfree(ESP_SKB_CB(skb)->tmp);
 798
 799        if (unlikely(err))
 800                goto out;
 801
 802        err = esp_remove_trailer(skb);
 803        if (unlikely(err < 0))
 804                goto out;
 805
 806        if (x->encap) {
 807                const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 808                int offset = skb_network_offset(skb) + sizeof(*ip6h);
 809                struct xfrm_encap_tmpl *encap = x->encap;
 810                u8 nexthdr = ip6h->nexthdr;
 811                __be16 frag_off, source;
 812                struct udphdr *uh;
 813                struct tcphdr *th;
 814
 815                offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
 816                uh = (void *)(skb->data + offset);
 817                th = (void *)(skb->data + offset);
 818                hdr_len += offset;
 819
 820                switch (x->encap->encap_type) {
 821                case TCP_ENCAP_ESPINTCP:
 822                        source = th->source;
 823                        break;
 824                case UDP_ENCAP_ESPINUDP:
 825                case UDP_ENCAP_ESPINUDP_NON_IKE:
 826                        source = uh->source;
 827                        break;
 828                default:
 829                        WARN_ON_ONCE(1);
 830                        err = -EINVAL;
 831                        goto out;
 832                }
 833
 834                /*
 835                 * 1) if the NAT-T peer's IP or port changed then
 836                 *    advertize the change to the keying daemon.
 837                 *    This is an inbound SA, so just compare
 838                 *    SRC ports.
 839                 */
 840                if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) ||
 841                    source != encap->encap_sport) {
 842                        xfrm_address_t ipaddr;
 843
 844                        memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6));
 845                        km_new_mapping(x, &ipaddr, source);
 846
 847                        /* XXX: perhaps add an extra
 848                         * policy check here, to see
 849                         * if we should allow or
 850                         * reject a packet from a
 851                         * different source
 852                         * address/port.
 853                         */
 854                }
 855
 856                /*
 857                 * 2) ignore UDP/TCP checksums in case
 858                 *    of NAT-T in Transport Mode, or
 859                 *    perform other post-processing fixes
 860                 *    as per draft-ietf-ipsec-udp-encaps-06,
 861                 *    section 3.1.2
 862                 */
 863                if (x->props.mode == XFRM_MODE_TRANSPORT)
 864                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 865        }
 866
 867        skb_postpull_rcsum(skb, skb_network_header(skb),
 868                           skb_network_header_len(skb));
 869        skb_pull_rcsum(skb, hlen);
 870        if (x->props.mode == XFRM_MODE_TUNNEL)
 871                skb_reset_transport_header(skb);
 872        else
 873                skb_set_transport_header(skb, -hdr_len);
 874
 875        /* RFC4303: Drop dummy packets without any error */
 876        if (err == IPPROTO_NONE)
 877                err = -EINVAL;
 878
 879out:
 880        return err;
 881}
 882EXPORT_SYMBOL_GPL(esp6_input_done2);
 883
 884static void esp_input_done(struct crypto_async_request *base, int err)
 885{
 886        struct sk_buff *skb = base->data;
 887
 888        xfrm_input_resume(skb, esp6_input_done2(skb, err));
 889}
 890
 891static void esp_input_restore_header(struct sk_buff *skb)
 892{
 893        esp_restore_header(skb, 0);
 894        __skb_pull(skb, 4);
 895}
 896
 897static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
 898{
 899        struct xfrm_state *x = xfrm_input_state(skb);
 900
 901        /* For ESN we move the header forward by 4 bytes to
 902         * accomodate the high bits.  We will move it back after
 903         * decryption.
 904         */
 905        if ((x->props.flags & XFRM_STATE_ESN)) {
 906                struct ip_esp_hdr *esph = skb_push(skb, 4);
 907
 908                *seqhi = esph->spi;
 909                esph->spi = esph->seq_no;
 910                esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
 911        }
 912}
 913
 914static void esp_input_done_esn(struct crypto_async_request *base, int err)
 915{
 916        struct sk_buff *skb = base->data;
 917
 918        esp_input_restore_header(skb);
 919        esp_input_done(base, err);
 920}
 921
 922static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
 923{
 924        struct crypto_aead *aead = x->data;
 925        struct aead_request *req;
 926        struct sk_buff *trailer;
 927        int ivlen = crypto_aead_ivsize(aead);
 928        int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
 929        int nfrags;
 930        int assoclen;
 931        int seqhilen;
 932        int ret = 0;
 933        void *tmp;
 934        __be32 *seqhi;
 935        u8 *iv;
 936        struct scatterlist *sg;
 937
 938        if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
 939                ret = -EINVAL;
 940                goto out;
 941        }
 942
 943        if (elen <= 0) {
 944                ret = -EINVAL;
 945                goto out;
 946        }
 947
 948        assoclen = sizeof(struct ip_esp_hdr);
 949        seqhilen = 0;
 950
 951        if (x->props.flags & XFRM_STATE_ESN) {
 952                seqhilen += sizeof(__be32);
 953                assoclen += seqhilen;
 954        }
 955
 956        if (!skb_cloned(skb)) {
 957                if (!skb_is_nonlinear(skb)) {
 958                        nfrags = 1;
 959
 960                        goto skip_cow;
 961                } else if (!skb_has_frag_list(skb)) {
 962                        nfrags = skb_shinfo(skb)->nr_frags;
 963                        nfrags++;
 964
 965                        goto skip_cow;
 966                }
 967        }
 968
 969        nfrags = skb_cow_data(skb, 0, &trailer);
 970        if (nfrags < 0) {
 971                ret = -EINVAL;
 972                goto out;
 973        }
 974
 975skip_cow:
 976        ret = -ENOMEM;
 977        tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
 978        if (!tmp)
 979                goto out;
 980
 981        ESP_SKB_CB(skb)->tmp = tmp;
 982        seqhi = esp_tmp_extra(tmp);
 983        iv = esp_tmp_iv(aead, tmp, seqhilen);
 984        req = esp_tmp_req(aead, iv);
 985        sg = esp_req_sg(aead, req);
 986
 987        esp_input_set_header(skb, seqhi);
 988
 989        sg_init_table(sg, nfrags);
 990        ret = skb_to_sgvec(skb, sg, 0, skb->len);
 991        if (unlikely(ret < 0)) {
 992                kfree(tmp);
 993                goto out;
 994        }
 995
 996        skb->ip_summed = CHECKSUM_NONE;
 997
 998        if ((x->props.flags & XFRM_STATE_ESN))
 999                aead_request_set_callback(req, 0, esp_input_done_esn, skb);
1000        else
1001                aead_request_set_callback(req, 0, esp_input_done, skb);
1002
1003        aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
1004        aead_request_set_ad(req, assoclen);
1005
1006        ret = crypto_aead_decrypt(req);
1007        if (ret == -EINPROGRESS)
1008                goto out;
1009
1010        if ((x->props.flags & XFRM_STATE_ESN))
1011                esp_input_restore_header(skb);
1012
1013        ret = esp6_input_done2(skb, ret);
1014
1015out:
1016        return ret;
1017}
1018
1019static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1020                    u8 type, u8 code, int offset, __be32 info)
1021{
1022        struct net *net = dev_net(skb->dev);
1023        const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
1024        struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
1025        struct xfrm_state *x;
1026
1027        if (type != ICMPV6_PKT_TOOBIG &&
1028            type != NDISC_REDIRECT)
1029                return 0;
1030
1031        x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
1032                              esph->spi, IPPROTO_ESP, AF_INET6);
1033        if (!x)
1034                return 0;
1035
1036        if (type == NDISC_REDIRECT)
1037                ip6_redirect(skb, net, skb->dev->ifindex, 0,
1038                             sock_net_uid(net, NULL));
1039        else
1040                ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
1041        xfrm_state_put(x);
1042
1043        return 0;
1044}
1045
1046static void esp6_destroy(struct xfrm_state *x)
1047{
1048        struct crypto_aead *aead = x->data;
1049
1050        if (!aead)
1051                return;
1052
1053        crypto_free_aead(aead);
1054}
1055
1056static int esp_init_aead(struct xfrm_state *x)
1057{
1058        char aead_name[CRYPTO_MAX_ALG_NAME];
1059        struct crypto_aead *aead;
1060        int err;
1061
1062        err = -ENAMETOOLONG;
1063        if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
1064                     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
1065                goto error;
1066
1067        aead = crypto_alloc_aead(aead_name, 0, 0);
1068        err = PTR_ERR(aead);
1069        if (IS_ERR(aead))
1070                goto error;
1071
1072        x->data = aead;
1073
1074        err = crypto_aead_setkey(aead, x->aead->alg_key,
1075                                 (x->aead->alg_key_len + 7) / 8);
1076        if (err)
1077                goto error;
1078
1079        err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
1080        if (err)
1081                goto error;
1082
1083error:
1084        return err;
1085}
1086
1087static int esp_init_authenc(struct xfrm_state *x)
1088{
1089        struct crypto_aead *aead;
1090        struct crypto_authenc_key_param *param;
1091        struct rtattr *rta;
1092        char *key;
1093        char *p;
1094        char authenc_name[CRYPTO_MAX_ALG_NAME];
1095        unsigned int keylen;
1096        int err;
1097
1098        err = -EINVAL;
1099        if (!x->ealg)
1100                goto error;
1101
1102        err = -ENAMETOOLONG;
1103
1104        if ((x->props.flags & XFRM_STATE_ESN)) {
1105                if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1106                             "%s%sauthencesn(%s,%s)%s",
1107                             x->geniv ?: "", x->geniv ? "(" : "",
1108                             x->aalg ? x->aalg->alg_name : "digest_null",
1109                             x->ealg->alg_name,
1110                             x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
1111                        goto error;
1112        } else {
1113                if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1114                             "%s%sauthenc(%s,%s)%s",
1115                             x->geniv ?: "", x->geniv ? "(" : "",
1116                             x->aalg ? x->aalg->alg_name : "digest_null",
1117                             x->ealg->alg_name,
1118                             x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
1119                        goto error;
1120        }
1121
1122        aead = crypto_alloc_aead(authenc_name, 0, 0);
1123        err = PTR_ERR(aead);
1124        if (IS_ERR(aead))
1125                goto error;
1126
1127        x->data = aead;
1128
1129        keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1130                 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1131        err = -ENOMEM;
1132        key = kmalloc(keylen, GFP_KERNEL);
1133        if (!key)
1134                goto error;
1135
1136        p = key;
1137        rta = (void *)p;
1138        rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1139        rta->rta_len = RTA_LENGTH(sizeof(*param));
1140        param = RTA_DATA(rta);
1141        p += RTA_SPACE(sizeof(*param));
1142
1143        if (x->aalg) {
1144                struct xfrm_algo_desc *aalg_desc;
1145
1146                memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1147                p += (x->aalg->alg_key_len + 7) / 8;
1148
1149                aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1150                BUG_ON(!aalg_desc);
1151
1152                err = -EINVAL;
1153                if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
1154                    crypto_aead_authsize(aead)) {
1155                        pr_info("ESP: %s digestsize %u != %hu\n",
1156                                x->aalg->alg_name,
1157                                crypto_aead_authsize(aead),
1158                                aalg_desc->uinfo.auth.icv_fullbits / 8);
1159                        goto free_key;
1160                }
1161
1162                err = crypto_aead_setauthsize(
1163                        aead, x->aalg->alg_trunc_len / 8);
1164                if (err)
1165                        goto free_key;
1166        }
1167
1168        param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1169        memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1170
1171        err = crypto_aead_setkey(aead, key, keylen);
1172
1173free_key:
1174        kfree(key);
1175
1176error:
1177        return err;
1178}
1179
1180static int esp6_init_state(struct xfrm_state *x)
1181{
1182        struct crypto_aead *aead;
1183        u32 align;
1184        int err;
1185
1186        x->data = NULL;
1187
1188        if (x->aead)
1189                err = esp_init_aead(x);
1190        else
1191                err = esp_init_authenc(x);
1192
1193        if (err)
1194                goto error;
1195
1196        aead = x->data;
1197
1198        x->props.header_len = sizeof(struct ip_esp_hdr) +
1199                              crypto_aead_ivsize(aead);
1200        switch (x->props.mode) {
1201        case XFRM_MODE_BEET:
1202                if (x->sel.family != AF_INET6)
1203                        x->props.header_len += IPV4_BEET_PHMAXLEN +
1204                                               (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
1205                break;
1206        default:
1207        case XFRM_MODE_TRANSPORT:
1208                break;
1209        case XFRM_MODE_TUNNEL:
1210                x->props.header_len += sizeof(struct ipv6hdr);
1211                break;
1212        }
1213
1214        if (x->encap) {
1215                struct xfrm_encap_tmpl *encap = x->encap;
1216
1217                switch (encap->encap_type) {
1218                default:
1219                        err = -EINVAL;
1220                        goto error;
1221                case UDP_ENCAP_ESPINUDP:
1222                        x->props.header_len += sizeof(struct udphdr);
1223                        break;
1224                case UDP_ENCAP_ESPINUDP_NON_IKE:
1225                        x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
1226                        break;
1227#ifdef CONFIG_INET6_ESPINTCP
1228                case TCP_ENCAP_ESPINTCP:
1229                        /* only the length field, TCP encap is done by
1230                         * the socket
1231                         */
1232                        x->props.header_len += 2;
1233                        break;
1234#endif
1235                }
1236        }
1237
1238        align = ALIGN(crypto_aead_blocksize(aead), 4);
1239        x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1240
1241error:
1242        return err;
1243}
1244
1245static int esp6_rcv_cb(struct sk_buff *skb, int err)
1246{
1247        return 0;
1248}
1249
1250static const struct xfrm_type esp6_type = {
1251        .description    = "ESP6",
1252        .owner          = THIS_MODULE,
1253        .proto          = IPPROTO_ESP,
1254        .flags          = XFRM_TYPE_REPLAY_PROT,
1255        .init_state     = esp6_init_state,
1256        .destructor     = esp6_destroy,
1257        .input          = esp6_input,
1258        .output         = esp6_output,
1259        .hdr_offset     = xfrm6_find_1stfragopt,
1260};
1261
1262static struct xfrm6_protocol esp6_protocol = {
1263        .handler        =       xfrm6_rcv,
1264        .input_handler  =       xfrm_input,
1265        .cb_handler     =       esp6_rcv_cb,
1266        .err_handler    =       esp6_err,
1267        .priority       =       0,
1268};
1269
1270static int __init esp6_init(void)
1271{
1272        if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
1273                pr_info("%s: can't add xfrm type\n", __func__);
1274                return -EAGAIN;
1275        }
1276        if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
1277                pr_info("%s: can't add protocol\n", __func__);
1278                xfrm_unregister_type(&esp6_type, AF_INET6);
1279                return -EAGAIN;
1280        }
1281
1282        return 0;
1283}
1284
1285static void __exit esp6_fini(void)
1286{
1287        if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
1288                pr_info("%s: can't remove protocol\n", __func__);
1289        xfrm_unregister_type(&esp6_type, AF_INET6);
1290}
1291
1292module_init(esp6_init);
1293module_exit(esp6_fini);
1294
1295MODULE_LICENSE("GPL");
1296MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
1297