linux/drivers/crypto/chelsio/chcr_ipsec.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T6 Crypto driver for Linux.
   3 *
   4 * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 *
  34 * Written and Maintained by:
  35 *      Atul Gupta (atul.gupta@chelsio.com)
  36 */
  37
  38#define pr_fmt(fmt) "chcr:" fmt
  39
  40#include <linux/kernel.h>
  41#include <linux/module.h>
  42#include <linux/crypto.h>
  43#include <linux/cryptohash.h>
  44#include <linux/skbuff.h>
  45#include <linux/rtnetlink.h>
  46#include <linux/highmem.h>
  47#include <linux/if_vlan.h>
  48#include <linux/ip.h>
  49#include <linux/netdevice.h>
  50#include <net/esp.h>
  51#include <net/xfrm.h>
  52#include <crypto/aes.h>
  53#include <crypto/algapi.h>
  54#include <crypto/hash.h>
  55#include <crypto/sha.h>
  56#include <crypto/authenc.h>
  57#include <crypto/internal/aead.h>
  58#include <crypto/null.h>
  59#include <crypto/internal/skcipher.h>
  60#include <crypto/aead.h>
  61#include <crypto/scatterwalk.h>
  62#include <crypto/internal/hash.h>
  63
  64#include "chcr_core.h"
  65#include "chcr_algo.h"
  66#include "chcr_crypto.h"
  67
  68/*
  69 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
  70 * into a WR.
  71 */
  72#define MAX_IMM_TX_PKT_LEN 256
  73#define GCM_ESP_IV_SIZE     8
  74
  75static int chcr_xfrm_add_state(struct xfrm_state *x);
  76static void chcr_xfrm_del_state(struct xfrm_state *x);
  77static void chcr_xfrm_free_state(struct xfrm_state *x);
  78static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
  79
  80static const struct xfrmdev_ops chcr_xfrmdev_ops = {
  81        .xdo_dev_state_add      = chcr_xfrm_add_state,
  82        .xdo_dev_state_delete   = chcr_xfrm_del_state,
  83        .xdo_dev_state_free     = chcr_xfrm_free_state,
  84        .xdo_dev_offload_ok     = chcr_ipsec_offload_ok,
  85};
  86
  87/* Add offload xfrms to Chelsio Interface */
  88void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
  89{
  90        struct net_device *netdev = NULL;
  91        int i;
  92
  93        for (i = 0; i < lld->nports; i++) {
  94                netdev = lld->ports[i];
  95                if (!netdev)
  96                        continue;
  97                netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
  98                netdev->hw_enc_features |= NETIF_F_HW_ESP;
  99                netdev->features |= NETIF_F_HW_ESP;
 100                rtnl_lock();
 101                netdev_change_features(netdev);
 102                rtnl_unlock();
 103        }
 104}
 105
 106static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
 107                                         struct ipsec_sa_entry *sa_entry)
 108{
 109        int hmac_ctrl;
 110        int authsize = x->aead->alg_icv_len / 8;
 111
 112        sa_entry->authsize = authsize;
 113
 114        switch (authsize) {
 115        case ICV_8:
 116                hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
 117                break;
 118        case ICV_12:
 119                hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
 120                break;
 121        case ICV_16:
 122                hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
 123                break;
 124        default:
 125                return -EINVAL;
 126        }
 127        return hmac_ctrl;
 128}
 129
 130static inline int chcr_ipsec_setkey(struct xfrm_state *x,
 131                                    struct ipsec_sa_entry *sa_entry)
 132{
 133        struct crypto_cipher *cipher;
 134        int keylen = (x->aead->alg_key_len + 7) / 8;
 135        unsigned char *key = x->aead->alg_key;
 136        int ck_size, key_ctx_size = 0;
 137        unsigned char ghash_h[AEAD_H_SIZE];
 138        int ret = 0;
 139
 140        if (keylen > 3) {
 141                keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
 142                memcpy(sa_entry->salt, key + keylen, 4);
 143        }
 144
 145        if (keylen == AES_KEYSIZE_128) {
 146                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 147        } else if (keylen == AES_KEYSIZE_192) {
 148                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
 149        } else if (keylen == AES_KEYSIZE_256) {
 150                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
 151        } else {
 152                pr_err("GCM: Invalid key length %d\n", keylen);
 153                ret = -EINVAL;
 154                goto out;
 155        }
 156
 157        memcpy(sa_entry->key, key, keylen);
 158        sa_entry->enckey_len = keylen;
 159        key_ctx_size = sizeof(struct _key_ctx) +
 160                              ((DIV_ROUND_UP(keylen, 16)) << 4) +
 161                              AEAD_H_SIZE;
 162
 163        sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
 164                                                 CHCR_KEYCTX_MAC_KEY_SIZE_128,
 165                                                 0, 0,
 166                                                 key_ctx_size >> 4);
 167
 168        /* Calculate the H = CIPH(K, 0 repeated 16 times).
 169         * It will go in key context
 170         */
 171        cipher = crypto_alloc_cipher("aes-generic", 0, 0);
 172        if (IS_ERR(cipher)) {
 173                sa_entry->enckey_len = 0;
 174                ret = -ENOMEM;
 175                goto out;
 176        }
 177
 178        ret = crypto_cipher_setkey(cipher, key, keylen);
 179        if (ret) {
 180                sa_entry->enckey_len = 0;
 181                goto out1;
 182        }
 183        memset(ghash_h, 0, AEAD_H_SIZE);
 184        crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
 185        memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
 186               16), ghash_h, AEAD_H_SIZE);
 187        sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
 188                              AEAD_H_SIZE;
 189out1:
 190        crypto_free_cipher(cipher);
 191out:
 192        return ret;
 193}
 194
 195/*
 196 * chcr_xfrm_add_state
 197 * returns 0 on success, negative error if failed to send message to FPGA
 198 * positive error if FPGA returned a bad response
 199 */
 200static int chcr_xfrm_add_state(struct xfrm_state *x)
 201{
 202        struct ipsec_sa_entry *sa_entry;
 203        int res = 0;
 204
 205        if (x->props.aalgo != SADB_AALG_NONE) {
 206                pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
 207                return -EINVAL;
 208        }
 209        if (x->props.calgo != SADB_X_CALG_NONE) {
 210                pr_debug("CHCR: Cannot offload compressed xfrm states\n");
 211                return -EINVAL;
 212        }
 213        if (x->props.flags & XFRM_STATE_ESN) {
 214                pr_debug("CHCR: Cannot offload ESN xfrm states\n");
 215                return -EINVAL;
 216        }
 217        if (x->props.family != AF_INET &&
 218            x->props.family != AF_INET6) {
 219                pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
 220                return -EINVAL;
 221        }
 222        if (x->props.mode != XFRM_MODE_TRANSPORT &&
 223            x->props.mode != XFRM_MODE_TUNNEL) {
 224                pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
 225                return -EINVAL;
 226        }
 227        if (x->id.proto != IPPROTO_ESP) {
 228                pr_debug("CHCR: Only ESP xfrm state offloaded\n");
 229                return -EINVAL;
 230        }
 231        if (x->encap) {
 232                pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
 233                return -EINVAL;
 234        }
 235        if (!x->aead) {
 236                pr_debug("CHCR: Cannot offload xfrm states without aead\n");
 237                return -EINVAL;
 238        }
 239        if (x->aead->alg_icv_len != 128 &&
 240            x->aead->alg_icv_len != 96) {
 241                pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
 242        return -EINVAL;
 243        }
 244        if ((x->aead->alg_key_len != 128 + 32) &&
 245            (x->aead->alg_key_len != 256 + 32)) {
 246                pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
 247                return -EINVAL;
 248        }
 249        if (x->tfcpad) {
 250                pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
 251                return -EINVAL;
 252        }
 253        if (!x->geniv) {
 254                pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
 255                return -EINVAL;
 256        }
 257        if (strcmp(x->geniv, "seqiv")) {
 258                pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
 259                return -EINVAL;
 260        }
 261
 262        sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
 263        if (!sa_entry) {
 264                res = -ENOMEM;
 265                goto out;
 266        }
 267
 268        sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
 269        chcr_ipsec_setkey(x, sa_entry);
 270        x->xso.offload_handle = (unsigned long)sa_entry;
 271        try_module_get(THIS_MODULE);
 272out:
 273        return res;
 274}
 275
 276static void chcr_xfrm_del_state(struct xfrm_state *x)
 277{
 278        /* do nothing */
 279        if (!x->xso.offload_handle)
 280                return;
 281}
 282
 283static void chcr_xfrm_free_state(struct xfrm_state *x)
 284{
 285        struct ipsec_sa_entry *sa_entry;
 286
 287        if (!x->xso.offload_handle)
 288                return;
 289
 290        sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
 291        kfree(sa_entry);
 292        module_put(THIS_MODULE);
 293}
 294
 295static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
 296{
 297        /* Offload with IP options is not supported yet */
 298        if (ip_hdr(skb)->ihl > 5)
 299                return false;
 300
 301        return true;
 302}
 303
 304static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
 305{
 306        int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
 307
 308        hdrlen += sizeof(struct cpl_tx_pkt);
 309        if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
 310                return hdrlen;
 311        return 0;
 312}
 313
 314static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
 315                                             unsigned int kctx_len)
 316{
 317        unsigned int flits;
 318        int hdrlen = is_eth_imm(skb, kctx_len);
 319
 320        /* If the skb is small enough, we can pump it out as a work request
 321         * with only immediate data.  In that case we just have to have the
 322         * TX Packet header plus the skb data in the Work Request.
 323         */
 324
 325        if (hdrlen)
 326                return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
 327
 328        flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
 329
 330        /* Otherwise, we're going to have to construct a Scatter gather list
 331         * of the skb body and fragments.  We also include the flits necessary
 332         * for the TX Packet Work Request and CPL.  We always have a firmware
 333         * Write Header (incorporated as part of the cpl_tx_pkt_lso and
 334         * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
 335         * message or, if we're doing a Large Send Offload, an LSO CPL message
 336         * with an embedded TX Packet Write CPL message.
 337         */
 338        flits += (sizeof(struct fw_ulptx_wr) +
 339                  sizeof(struct chcr_ipsec_req) +
 340                  kctx_len +
 341                  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
 342        return flits;
 343}
 344
 345inline void *copy_cpltx_pktxt(struct sk_buff *skb,
 346                                struct net_device *dev,
 347                                void *pos)
 348{
 349        struct cpl_tx_pkt_core *cpl;
 350        struct sge_eth_txq *q;
 351        struct adapter *adap;
 352        struct port_info *pi;
 353        u32 ctrl0, qidx;
 354        u64 cntrl = 0;
 355        int left;
 356
 357        pi = netdev_priv(dev);
 358        adap = pi->adapter;
 359        qidx = skb->queue_mapping;
 360        q = &adap->sge.ethtxq[qidx + pi->first_qset];
 361
 362        left = (void *)q->q.stat - pos;
 363        if (!left)
 364                pos = q->q.desc;
 365
 366        cpl = (struct cpl_tx_pkt_core *)pos;
 367
 368        cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
 369        ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
 370                               TXPKT_PF_V(adap->pf);
 371        if (skb_vlan_tag_present(skb)) {
 372                q->vlan_ins++;
 373                cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
 374        }
 375
 376        cpl->ctrl0 = htonl(ctrl0);
 377        cpl->pack = htons(0);
 378        cpl->len = htons(skb->len);
 379        cpl->ctrl1 = cpu_to_be64(cntrl);
 380
 381        pos += sizeof(struct cpl_tx_pkt_core);
 382        return pos;
 383}
 384
 385inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
 386                                struct net_device *dev,
 387                                void *pos,
 388                                struct ipsec_sa_entry *sa_entry)
 389{
 390        struct _key_ctx *key_ctx;
 391        int left, eoq, key_len;
 392        struct sge_eth_txq *q;
 393        struct adapter *adap;
 394        struct port_info *pi;
 395        unsigned int qidx;
 396
 397        pi = netdev_priv(dev);
 398        adap = pi->adapter;
 399        qidx = skb->queue_mapping;
 400        q = &adap->sge.ethtxq[qidx + pi->first_qset];
 401        key_len = sa_entry->kctx_len;
 402
 403        /* end of queue, reset pos to start of queue */
 404        eoq = (void *)q->q.stat - pos;
 405        left = eoq;
 406        if (!eoq) {
 407                pos = q->q.desc;
 408                left = 64 * q->q.size;
 409        }
 410
 411        /* Copy the Key context header */
 412        key_ctx = (struct _key_ctx *)pos;
 413        key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
 414        memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
 415        pos += sizeof(struct _key_ctx);
 416        left -= sizeof(struct _key_ctx);
 417
 418        if (likely(key_len <= left)) {
 419                memcpy(key_ctx->key, sa_entry->key, key_len);
 420                pos += key_len;
 421        } else {
 422                memcpy(pos, sa_entry->key, left);
 423                memcpy(q->q.desc, sa_entry->key + left,
 424                       key_len - left);
 425                pos = (u8 *)q->q.desc + (key_len - left);
 426        }
 427        /* Copy CPL TX PKT XT */
 428        pos = copy_cpltx_pktxt(skb, dev, pos);
 429
 430        return pos;
 431}
 432
 433inline void *chcr_crypto_wreq(struct sk_buff *skb,
 434                               struct net_device *dev,
 435                               void *pos,
 436                               int credits,
 437                               struct ipsec_sa_entry *sa_entry)
 438{
 439        struct port_info *pi = netdev_priv(dev);
 440        struct adapter *adap = pi->adapter;
 441        unsigned int immdatalen = 0;
 442        unsigned int ivsize = GCM_ESP_IV_SIZE;
 443        struct chcr_ipsec_wr *wr;
 444        unsigned int flits;
 445        u32 wr_mid;
 446        int qidx = skb_get_queue_mapping(skb);
 447        struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
 448        unsigned int kctx_len = sa_entry->kctx_len;
 449        int qid = q->q.cntxt_id;
 450
 451        atomic_inc(&adap->chcr_stats.ipsec_cnt);
 452
 453        flits = calc_tx_sec_flits(skb, kctx_len);
 454
 455        if (is_eth_imm(skb, kctx_len))
 456                immdatalen = skb->len;
 457
 458        /* WR Header */
 459        wr = (struct chcr_ipsec_wr *)pos;
 460        wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
 461        wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
 462
 463        if (unlikely(credits < ETHTXQ_STOP_THRES)) {
 464                netif_tx_stop_queue(q->txq);
 465                q->q.stops++;
 466                wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
 467        }
 468        wr_mid |= FW_ULPTX_WR_DATA_F;
 469        wr->wreq.flowid_len16 = htonl(wr_mid);
 470
 471        /* ULPTX */
 472        wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
 473        wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2)  - 1);
 474
 475        /* Sub-command */
 476        wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen);
 477        wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
 478                                         sizeof(wr->req.key_ctx) +
 479                                         kctx_len +
 480                                         sizeof(struct cpl_tx_pkt_core) +
 481                                         immdatalen);
 482
 483        /* CPL_SEC_PDU */
 484        wr->req.sec_cpl.op_ivinsrtofst = htonl(
 485                                CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
 486                                CPL_TX_SEC_PDU_CPLLEN_V(2) |
 487                                CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
 488                                CPL_TX_SEC_PDU_IVINSRTOFST_V(
 489                                (skb_transport_offset(skb) +
 490                                sizeof(struct ip_esp_hdr) + 1)));
 491
 492        wr->req.sec_cpl.pldlen = htonl(skb->len);
 493
 494        wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
 495                                (skb_transport_offset(skb) + 1),
 496                                (skb_transport_offset(skb) +
 497                                 sizeof(struct ip_esp_hdr)),
 498                                (skb_transport_offset(skb) +
 499                                 sizeof(struct ip_esp_hdr) +
 500                                 GCM_ESP_IV_SIZE + 1), 0);
 501
 502        wr->req.sec_cpl.cipherstop_lo_authinsert =
 503                FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
 504                                           sizeof(struct ip_esp_hdr) +
 505                                           GCM_ESP_IV_SIZE + 1,
 506                                           sa_entry->authsize,
 507                                           sa_entry->authsize);
 508        wr->req.sec_cpl.seqno_numivs =
 509                FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
 510                                         CHCR_SCMD_CIPHER_MODE_AES_GCM,
 511                                         CHCR_SCMD_AUTH_MODE_GHASH,
 512                                         sa_entry->hmac_ctrl,
 513                                         ivsize >> 1);
 514        wr->req.sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
 515                                                                  0, 0, 0);
 516
 517        pos += sizeof(struct fw_ulptx_wr) +
 518               sizeof(struct ulp_txpkt) +
 519               sizeof(struct ulptx_idata) +
 520               sizeof(struct cpl_tx_sec_pdu);
 521
 522        pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
 523
 524        return pos;
 525}
 526
 527/**
 528 *      flits_to_desc - returns the num of Tx descriptors for the given flits
 529 *      @n: the number of flits
 530 *
 531 *      Returns the number of Tx descriptors needed for the supplied number
 532 *      of flits.
 533 */
 534static inline unsigned int flits_to_desc(unsigned int n)
 535{
 536        WARN_ON(n > SGE_MAX_WR_LEN / 8);
 537        return DIV_ROUND_UP(n, 8);
 538}
 539
 540static inline unsigned int txq_avail(const struct sge_txq *q)
 541{
 542        return q->size - 1 - q->in_use;
 543}
 544
 545static void eth_txq_stop(struct sge_eth_txq *q)
 546{
 547        netif_tx_stop_queue(q->txq);
 548        q->q.stops++;
 549}
 550
 551static inline void txq_advance(struct sge_txq *q, unsigned int n)
 552{
 553        q->in_use += n;
 554        q->pidx += n;
 555        if (q->pidx >= q->size)
 556                q->pidx -= q->size;
 557}
 558
 559/*
 560 *      chcr_ipsec_xmit called from ULD Tx handler
 561 */
 562int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
 563{
 564        struct xfrm_state *x = xfrm_input_state(skb);
 565        struct ipsec_sa_entry *sa_entry;
 566        u64 *pos, *end, *before, *sgl;
 567        int qidx, left, credits;
 568        unsigned int flits = 0, ndesc, kctx_len;
 569        struct adapter *adap;
 570        struct sge_eth_txq *q;
 571        struct port_info *pi;
 572        dma_addr_t addr[MAX_SKB_FRAGS + 1];
 573        bool immediate = false;
 574
 575        if (!x->xso.offload_handle)
 576                return NETDEV_TX_BUSY;
 577
 578        sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
 579        kctx_len = sa_entry->kctx_len;
 580
 581        if (skb->sp->len != 1) {
 582out_free:       dev_kfree_skb_any(skb);
 583                return NETDEV_TX_OK;
 584        }
 585
 586        pi = netdev_priv(dev);
 587        adap = pi->adapter;
 588        qidx = skb->queue_mapping;
 589        q = &adap->sge.ethtxq[qidx + pi->first_qset];
 590
 591        cxgb4_reclaim_completed_tx(adap, &q->q, true);
 592
 593        flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
 594        ndesc = flits_to_desc(flits);
 595        credits = txq_avail(&q->q) - ndesc;
 596
 597        if (unlikely(credits < 0)) {
 598                eth_txq_stop(q);
 599                dev_err(adap->pdev_dev,
 600                        "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
 601                        dev->name, qidx, credits, ndesc, txq_avail(&q->q),
 602                        flits);
 603                return NETDEV_TX_BUSY;
 604        }
 605
 606        if (is_eth_imm(skb, kctx_len))
 607                immediate = true;
 608
 609        if (!immediate &&
 610            unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
 611                q->mapping_err++;
 612                goto out_free;
 613        }
 614
 615        pos = (u64 *)&q->q.desc[q->q.pidx];
 616        before = (u64 *)pos;
 617        end = (u64 *)pos + flits;
 618        /* Setup IPSec CPL */
 619        pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
 620                                       credits, sa_entry);
 621        if (before > (u64 *)pos) {
 622                left = (u8 *)end - (u8 *)q->q.stat;
 623                end = (void *)q->q.desc + left;
 624        }
 625        if (pos == (u64 *)q->q.stat) {
 626                left = (u8 *)end - (u8 *)q->q.stat;
 627                end = (void *)q->q.desc + left;
 628                pos = (void *)q->q.desc;
 629        }
 630
 631        sgl = (void *)pos;
 632        if (immediate) {
 633                cxgb4_inline_tx_skb(skb, &q->q, sgl);
 634                dev_consume_skb_any(skb);
 635        } else {
 636                int last_desc;
 637
 638                cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
 639                                0, addr);
 640                skb_orphan(skb);
 641
 642                last_desc = q->q.pidx + ndesc - 1;
 643                if (last_desc >= q->q.size)
 644                        last_desc -= q->q.size;
 645                q->q.sdesc[last_desc].skb = skb;
 646                q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
 647        }
 648        txq_advance(&q->q, ndesc);
 649
 650        cxgb4_ring_tx_db(adap, &q->q, ndesc);
 651        return NETDEV_TX_OK;
 652}
 653