linux/drivers/crypto/chelsio/chcr_ktls.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (C) 2020 Chelsio Communications.  All rights reserved. */
   3
   4#ifdef CONFIG_CHELSIO_TLS_DEVICE
   5#include <linux/highmem.h>
   6#include "chcr_ktls.h"
   7#include "clip_tbl.h"
   8
   9static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
  10/*
  11 * chcr_ktls_save_keys: calculate and save crypto keys.
  12 * @tx_info - driver specific tls info.
  13 * @crypto_info - tls crypto information.
  14 * @direction - TX/RX direction.
  15 * return - SUCCESS/FAILURE.
  16 */
  17static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
  18                               struct tls_crypto_info *crypto_info,
  19                               enum tls_offload_ctx_dir direction)
  20{
  21        int ck_size, key_ctx_size, mac_key_size, keylen, ghash_size, ret;
  22        unsigned char ghash_h[TLS_CIPHER_AES_GCM_256_TAG_SIZE];
  23        struct tls12_crypto_info_aes_gcm_128 *info_128_gcm;
  24        struct ktls_key_ctx *kctx = &tx_info->key_ctx;
  25        struct crypto_cipher *cipher;
  26        unsigned char *key, *salt;
  27
  28        switch (crypto_info->cipher_type) {
  29        case TLS_CIPHER_AES_GCM_128:
  30                info_128_gcm =
  31                        (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
  32                keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
  33                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  34                tx_info->salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
  35                mac_key_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
  36                tx_info->iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  37                tx_info->iv = be64_to_cpu(*(__be64 *)info_128_gcm->iv);
  38
  39                ghash_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
  40                key = info_128_gcm->key;
  41                salt = info_128_gcm->salt;
  42                tx_info->record_no = *(u64 *)info_128_gcm->rec_seq;
  43
  44                /* The SCMD fields used when encrypting a full TLS
  45                 * record. Its a one time calculation till the
  46                 * connection exists.
  47                 */
  48                tx_info->scmd0_seqno_numivs =
  49                        SCMD_SEQ_NO_CTRL_V(CHCR_SCMD_SEQ_NO_CTRL_64BIT) |
  50                        SCMD_CIPH_AUTH_SEQ_CTRL_F |
  51                        SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_TLS) |
  52                        SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
  53                        SCMD_AUTH_MODE_V(CHCR_SCMD_AUTH_MODE_GHASH) |
  54                        SCMD_IV_SIZE_V(TLS_CIPHER_AES_GCM_128_IV_SIZE >> 1) |
  55                        SCMD_NUM_IVS_V(1);
  56
  57                /* keys will be sent inline. */
  58                tx_info->scmd0_ivgen_hdrlen = SCMD_KEY_CTX_INLINE_F;
  59
  60                /* The SCMD fields used when encrypting a partial TLS
  61                 * record (no trailer and possibly a truncated payload).
  62                 */
  63                tx_info->scmd0_short_seqno_numivs =
  64                        SCMD_CIPH_AUTH_SEQ_CTRL_F |
  65                        SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) |
  66                        SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_CTR) |
  67                        SCMD_IV_SIZE_V(AES_BLOCK_LEN >> 1);
  68
  69                tx_info->scmd0_short_ivgen_hdrlen =
  70                        tx_info->scmd0_ivgen_hdrlen | SCMD_AADIVDROP_F;
  71
  72                break;
  73
  74        default:
  75                pr_err("GCM: cipher type 0x%x not supported\n",
  76                       crypto_info->cipher_type);
  77                ret = -EINVAL;
  78                goto out;
  79        }
  80
  81        key_ctx_size = CHCR_KTLS_KEY_CTX_LEN +
  82                       roundup(keylen, 16) + ghash_size;
  83        /* Calculate the H = CIPH(K, 0 repeated 16 times).
  84         * It will go in key context
  85         */
  86        cipher = crypto_alloc_cipher("aes", 0, 0);
  87        if (IS_ERR(cipher)) {
  88                ret = -ENOMEM;
  89                goto out;
  90        }
  91
  92        ret = crypto_cipher_setkey(cipher, key, keylen);
  93        if (ret)
  94                goto out1;
  95
  96        memset(ghash_h, 0, ghash_size);
  97        crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
  98
  99        /* fill the Key context */
 100        if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
 101                kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
 102                                                 mac_key_size,
 103                                                 key_ctx_size >> 4);
 104        } else {
 105                ret = -EINVAL;
 106                goto out1;
 107        }
 108
 109        memcpy(kctx->salt, salt, tx_info->salt_size);
 110        memcpy(kctx->key, key, keylen);
 111        memcpy(kctx->key + keylen, ghash_h, ghash_size);
 112        tx_info->key_ctx_len = key_ctx_size;
 113
 114out1:
 115        crypto_free_cipher(cipher);
 116out:
 117        return ret;
 118}
 119
 120static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
 121                                             int new_state)
 122{
 123        /* This function can be called from both rx (interrupt context) and tx
 124         * queue contexts.
 125         */
 126        spin_lock_bh(&tx_info->lock);
 127        switch (tx_info->connection_state) {
 128        case KTLS_CONN_CLOSED:
 129                tx_info->connection_state = new_state;
 130                break;
 131
 132        case KTLS_CONN_ACT_OPEN_REQ:
 133                /* only go forward if state is greater than current state. */
 134                if (new_state <= tx_info->connection_state)
 135                        break;
 136                /* update to the next state and also initialize TCB */
 137                tx_info->connection_state = new_state;
 138                fallthrough;
 139        case KTLS_CONN_ACT_OPEN_RPL:
 140                /* if we are stuck in this state, means tcb init might not
 141                 * received by HW, try sending it again.
 142                 */
 143                if (!chcr_init_tcb_fields(tx_info))
 144                        tx_info->connection_state = KTLS_CONN_SET_TCB_REQ;
 145                break;
 146
 147        case KTLS_CONN_SET_TCB_REQ:
 148                /* only go forward if state is greater than current state. */
 149                if (new_state <= tx_info->connection_state)
 150                        break;
 151                /* update to the next state and check if l2t_state is valid  */
 152                tx_info->connection_state = new_state;
 153                fallthrough;
 154        case KTLS_CONN_SET_TCB_RPL:
 155                /* Check if l2t state is valid, then move to ready state. */
 156                if (cxgb4_check_l2t_valid(tx_info->l2te)) {
 157                        tx_info->connection_state = KTLS_CONN_TX_READY;
 158                        atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_ctx);
 159                }
 160                break;
 161
 162        case KTLS_CONN_TX_READY:
 163                /* nothing to be done here */
 164                break;
 165
 166        default:
 167                pr_err("unknown KTLS connection state\n");
 168                break;
 169        }
 170        spin_unlock_bh(&tx_info->lock);
 171
 172        return tx_info->connection_state;
 173}
 174/*
 175 * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection.
 176 * @sk - tcp socket.
 177 * @tx_info - driver specific tls info.
 178 * @atid - connection active tid.
 179 * return - send success/failure.
 180 */
 181static int chcr_ktls_act_open_req(struct sock *sk,
 182                                  struct chcr_ktls_info *tx_info,
 183                                  int atid)
 184{
 185        struct inet_sock *inet = inet_sk(sk);
 186        struct cpl_t6_act_open_req *cpl6;
 187        struct cpl_act_open_req *cpl;
 188        struct sk_buff *skb;
 189        unsigned int len;
 190        int qid_atid;
 191        u64 options;
 192
 193        len = sizeof(*cpl6);
 194        skb = alloc_skb(len, GFP_KERNEL);
 195        if (unlikely(!skb))
 196                return -ENOMEM;
 197        /* mark it a control pkt */
 198        set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
 199
 200        cpl6 = __skb_put_zero(skb, len);
 201        cpl = (struct cpl_act_open_req *)cpl6;
 202        INIT_TP_WR(cpl6, 0);
 203        qid_atid = TID_QID_V(tx_info->rx_qid) |
 204                   TID_TID_V(atid);
 205        OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid));
 206        cpl->local_port = inet->inet_sport;
 207        cpl->peer_port = inet->inet_dport;
 208        cpl->local_ip = inet->inet_rcv_saddr;
 209        cpl->peer_ip = inet->inet_daddr;
 210
 211        /* fill first 64 bit option field. */
 212        options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
 213                  SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
 214        cpl->opt0 = cpu_to_be64(options);
 215
 216        /* next 64 bit option field. */
 217        options =
 218                TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
 219        cpl->opt2 = htonl(options);
 220
 221        return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
 222}
 223
 224#if IS_ENABLED(CONFIG_IPV6)
 225/*
 226 * chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection.
 227 * @sk - tcp socket.
 228 * @tx_info - driver specific tls info.
 229 * @atid - connection active tid.
 230 * return - send success/failure.
 231 */
 232static int chcr_ktls_act_open_req6(struct sock *sk,
 233                                   struct chcr_ktls_info *tx_info,
 234                                   int atid)
 235{
 236        struct inet_sock *inet = inet_sk(sk);
 237        struct cpl_t6_act_open_req6 *cpl6;
 238        struct cpl_act_open_req6 *cpl;
 239        struct sk_buff *skb;
 240        unsigned int len;
 241        int qid_atid;
 242        u64 options;
 243
 244        len = sizeof(*cpl6);
 245        skb = alloc_skb(len, GFP_KERNEL);
 246        if (unlikely(!skb))
 247                return -ENOMEM;
 248        /* mark it a control pkt */
 249        set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
 250
 251        cpl6 = __skb_put_zero(skb, len);
 252        cpl = (struct cpl_act_open_req6 *)cpl6;
 253        INIT_TP_WR(cpl6, 0);
 254        qid_atid = TID_QID_V(tx_info->rx_qid) | TID_TID_V(atid);
 255        OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid));
 256        cpl->local_port = inet->inet_sport;
 257        cpl->peer_port = inet->inet_dport;
 258        cpl->local_ip_hi = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[0];
 259        cpl->local_ip_lo = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[8];
 260        cpl->peer_ip_hi = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[0];
 261        cpl->peer_ip_lo = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[8];
 262
 263        /* first 64 bit option field. */
 264        options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
 265                  SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
 266        cpl->opt0 = cpu_to_be64(options);
 267        /* next 64 bit option field. */
 268        options =
 269                TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
 270        cpl->opt2 = htonl(options);
 271
 272        return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
 273}
 274#endif /* #if IS_ENABLED(CONFIG_IPV6) */
 275
 276/*
 277 * chcr_setup_connection:  create a TCB entry so that TP will form tcp packets.
 278 * @sk - tcp socket.
 279 * @tx_info - driver specific tls info.
 280 * return: NET_TX_OK/NET_XMIT_DROP
 281 */
 282static int chcr_setup_connection(struct sock *sk,
 283                                 struct chcr_ktls_info *tx_info)
 284{
 285        struct tid_info *t = &tx_info->adap->tids;
 286        int atid, ret = 0;
 287
 288        atid = cxgb4_alloc_atid(t, tx_info);
 289        if (atid == -1)
 290                return -EINVAL;
 291
 292        tx_info->atid = atid;
 293        tx_info->ip_family = sk->sk_family;
 294
 295        if (sk->sk_family == AF_INET) {
 296                tx_info->ip_family = AF_INET;
 297                ret = chcr_ktls_act_open_req(sk, tx_info, atid);
 298#if IS_ENABLED(CONFIG_IPV6)
 299        } else {
 300                if (!sk->sk_ipv6only &&
 301                    ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
 302                        tx_info->ip_family = AF_INET;
 303                        ret = chcr_ktls_act_open_req(sk, tx_info, atid);
 304                } else {
 305                        tx_info->ip_family = AF_INET6;
 306                        ret = cxgb4_clip_get(tx_info->netdev,
 307                                             (const u32 *)
 308                                             &sk->sk_v6_rcv_saddr.s6_addr,
 309                                             1);
 310                        if (ret)
 311                                goto out;
 312                        ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
 313                }
 314#endif
 315        }
 316
 317        /* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret
 318         * success, if any other return type clear atid and return that failure.
 319         */
 320        if (ret) {
 321                if (ret == NET_XMIT_CN)
 322                        ret = 0;
 323                else
 324                        cxgb4_free_atid(t, atid);
 325                goto out;
 326        }
 327
 328        /* update the connection state */
 329        chcr_ktls_update_connection_state(tx_info, KTLS_CONN_ACT_OPEN_REQ);
 330out:
 331        return ret;
 332}
 333
 334/*
 335 * chcr_set_tcb_field: update tcb fields.
 336 * @tx_info - driver specific tls info.
 337 * @word - TCB word.
 338 * @mask - TCB word related mask.
 339 * @val - TCB word related value.
 340 * @no_reply - set 1 if not looking for TP response.
 341 */
 342static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
 343                              u64 mask, u64 val, int no_reply)
 344{
 345        struct cpl_set_tcb_field *req;
 346        struct sk_buff *skb;
 347
 348        skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
 349        if (!skb)
 350                return -ENOMEM;
 351
 352        req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
 353        INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, tx_info->tid);
 354        req->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
 355                                NO_REPLY_V(no_reply));
 356        req->word_cookie = htons(TCB_WORD_V(word));
 357        req->mask = cpu_to_be64(mask);
 358        req->val = cpu_to_be64(val);
 359
 360        set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
 361        return cxgb4_ofld_send(tx_info->netdev, skb);
 362}
 363
 364/*
 365 * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
 366 * @tx_info - driver specific tls info.
 367 * return: NET_TX_OK/NET_XMIT_DROP.
 368 */
 369static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
 370{
 371        return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
 372                                  TCB_T_STATE_V(TCB_T_STATE_M),
 373                                  CHCR_TCB_STATE_CLOSED, 1);
 374}
 375
 376/*
 377 * chcr_ktls_dev_del:  call back for tls_dev_del.
 378 * Remove the tid and l2t entry and close the connection.
 379 * it per connection basis.
 380 * @netdev - net device.
 381 * @tls_cts - tls context.
 382 * @direction - TX/RX crypto direction
 383 */
 384void chcr_ktls_dev_del(struct net_device *netdev,
 385                       struct tls_context *tls_ctx,
 386                       enum tls_offload_ctx_dir direction)
 387{
 388        struct chcr_ktls_ofld_ctx_tx *tx_ctx =
 389                                chcr_get_ktls_tx_context(tls_ctx);
 390        struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
 391        struct sock *sk;
 392
 393        if (!tx_info)
 394                return;
 395        sk = tx_info->sk;
 396
 397        spin_lock(&tx_info->lock);
 398        tx_info->connection_state = KTLS_CONN_CLOSED;
 399        spin_unlock(&tx_info->lock);
 400
 401        /* clear l2t entry */
 402        if (tx_info->l2te)
 403                cxgb4_l2t_release(tx_info->l2te);
 404
 405#if IS_ENABLED(CONFIG_IPV6)
 406        /* clear clip entry */
 407        if (tx_info->ip_family == AF_INET6)
 408                cxgb4_clip_release(netdev,
 409                                   (const u32 *)&sk->sk_v6_daddr.in6_u.u6_addr8,
 410                                   1);
 411#endif
 412
 413        /* clear tid */
 414        if (tx_info->tid != -1) {
 415                /* clear tcb state and then release tid */
 416                chcr_ktls_mark_tcb_close(tx_info);
 417                cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
 418                                 tx_info->tid, tx_info->ip_family);
 419        }
 420
 421        atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_connection_close);
 422        kvfree(tx_info);
 423        tx_ctx->chcr_info = NULL;
 424        /* release module refcount */
 425        module_put(THIS_MODULE);
 426}
 427
 428/*
 429 * chcr_ktls_dev_add:  call back for tls_dev_add.
 430 * Create a tcb entry for TP. Also add l2t entry for the connection. And
 431 * generate keys & save those keys locally.
 432 * @netdev - net device.
 433 * @tls_cts - tls context.
 434 * @direction - TX/RX crypto direction
 435 * return: SUCCESS/FAILURE.
 436 */
 437int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
 438                      enum tls_offload_ctx_dir direction,
 439                      struct tls_crypto_info *crypto_info,
 440                      u32 start_offload_tcp_sn)
 441{
 442        struct tls_context *tls_ctx = tls_get_ctx(sk);
 443        struct chcr_ktls_ofld_ctx_tx *tx_ctx;
 444        struct chcr_ktls_info *tx_info;
 445        struct dst_entry *dst;
 446        struct adapter *adap;
 447        struct port_info *pi;
 448        struct neighbour *n;
 449        u8 daaddr[16];
 450        int ret = -1;
 451
 452        tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
 453
 454        pi = netdev_priv(netdev);
 455        adap = pi->adapter;
 456        if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
 457                pr_err("not expecting for RX direction\n");
 458                ret = -EINVAL;
 459                goto out;
 460        }
 461        if (tx_ctx->chcr_info) {
 462                ret = -EINVAL;
 463                goto out;
 464        }
 465
 466        tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
 467        if (!tx_info) {
 468                ret = -ENOMEM;
 469                goto out;
 470        }
 471
 472        spin_lock_init(&tx_info->lock);
 473
 474        /* clear connection state */
 475        spin_lock(&tx_info->lock);
 476        tx_info->connection_state = KTLS_CONN_CLOSED;
 477        spin_unlock(&tx_info->lock);
 478
 479        tx_info->sk = sk;
 480        /* initialize tid and atid to -1, 0 is a also a valid id. */
 481        tx_info->tid = -1;
 482        tx_info->atid = -1;
 483
 484        tx_info->adap = adap;
 485        tx_info->netdev = netdev;
 486        tx_info->first_qset = pi->first_qset;
 487        tx_info->tx_chan = pi->tx_chan;
 488        tx_info->smt_idx = pi->smt_idx;
 489        tx_info->port_id = pi->port_id;
 490
 491        tx_info->rx_qid = chcr_get_first_rx_qid(adap);
 492        if (unlikely(tx_info->rx_qid < 0))
 493                goto out2;
 494
 495        tx_info->prev_seq = start_offload_tcp_sn;
 496        tx_info->tcp_start_seq_number = start_offload_tcp_sn;
 497
 498        /* save crypto keys */
 499        ret = chcr_ktls_save_keys(tx_info, crypto_info, direction);
 500        if (ret < 0)
 501                goto out2;
 502
 503        /* get peer ip */
 504        if (sk->sk_family == AF_INET) {
 505                memcpy(daaddr, &sk->sk_daddr, 4);
 506#if IS_ENABLED(CONFIG_IPV6)
 507        } else {
 508                if (!sk->sk_ipv6only &&
 509                    ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)
 510                        memcpy(daaddr, &sk->sk_daddr, 4);
 511                else
 512                        memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
 513#endif
 514        }
 515
 516        /* get the l2t index */
 517        dst = sk_dst_get(sk);
 518        if (!dst) {
 519                pr_err("DST entry not found\n");
 520                goto out2;
 521        }
 522        n = dst_neigh_lookup(dst, daaddr);
 523        if (!n || !n->dev) {
 524                pr_err("neighbour not found\n");
 525                dst_release(dst);
 526                goto out2;
 527        }
 528        tx_info->l2te  = cxgb4_l2t_get(adap->l2t, n, n->dev, 0);
 529
 530        neigh_release(n);
 531        dst_release(dst);
 532
 533        if (!tx_info->l2te) {
 534                pr_err("l2t entry not found\n");
 535                goto out2;
 536        }
 537
 538        tx_ctx->chcr_info = tx_info;
 539
 540        /* create a filter and call cxgb4_l2t_send to send the packet out, which
 541         * will take care of updating l2t entry in hw if not already done.
 542         */
 543        ret = chcr_setup_connection(sk, tx_info);
 544        if (ret)
 545                goto out2;
 546
 547        /* Driver shouldn't be removed until any single connection exists */
 548        if (!try_module_get(THIS_MODULE)) {
 549                ret = -EINVAL;
 550                goto out2;
 551        }
 552
 553        atomic64_inc(&adap->chcr_stats.ktls_tx_connection_open);
 554        return 0;
 555out2:
 556        kvfree(tx_info);
 557out:
 558        atomic64_inc(&adap->chcr_stats.ktls_tx_connection_fail);
 559        return ret;
 560}
 561
 562/*
 563 * chcr_init_tcb_fields:  Initialize tcb fields to handle TCP seq number
 564 *                        handling.
 565 * @tx_info - driver specific tls info.
 566 * return: NET_TX_OK/NET_XMIT_DROP
 567 */
 568static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info)
 569{
 570        int  ret = 0;
 571
 572        /* set tcb in offload and bypass */
 573        ret =
 574        chcr_set_tcb_field(tx_info, TCB_T_FLAGS_W,
 575                           TCB_T_FLAGS_V(TF_CORE_BYPASS_F | TF_NON_OFFLOAD_F),
 576                           TCB_T_FLAGS_V(TF_CORE_BYPASS_F), 1);
 577        if (ret)
 578                return ret;
 579        /* reset snd_una and snd_next fields in tcb */
 580        ret = chcr_set_tcb_field(tx_info, TCB_SND_UNA_RAW_W,
 581                                 TCB_SND_NXT_RAW_V(TCB_SND_NXT_RAW_M) |
 582                                 TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
 583                                 0, 1);
 584        if (ret)
 585                return ret;
 586
 587        /* reset send max */
 588        ret = chcr_set_tcb_field(tx_info, TCB_SND_MAX_RAW_W,
 589                                 TCB_SND_MAX_RAW_V(TCB_SND_MAX_RAW_M),
 590                                 0, 1);
 591        if (ret)
 592                return ret;
 593
 594        /* update l2t index and request for tp reply to confirm tcb is
 595         * initialised to handle tx traffic.
 596         */
 597        ret = chcr_set_tcb_field(tx_info, TCB_L2T_IX_W,
 598                                 TCB_L2T_IX_V(TCB_L2T_IX_M),
 599                                 TCB_L2T_IX_V(tx_info->l2te->idx), 0);
 600        return ret;
 601}
 602
 603/*
 604 * chcr_ktls_cpl_act_open_rpl: connection reply received from TP.
 605 */
 606int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input)
 607{
 608        const struct cpl_act_open_rpl *p = (void *)input;
 609        struct chcr_ktls_info *tx_info = NULL;
 610        unsigned int atid, tid, status;
 611        struct tid_info *t;
 612
 613        tid = GET_TID(p);
 614        status = AOPEN_STATUS_G(ntohl(p->atid_status));
 615        atid = TID_TID_G(AOPEN_ATID_G(ntohl(p->atid_status)));
 616
 617        t = &adap->tids;
 618        tx_info = lookup_atid(t, atid);
 619
 620        if (!tx_info || tx_info->atid != atid) {
 621                pr_err("tx_info or atid is not correct\n");
 622                return -1;
 623        }
 624
 625        if (!status) {
 626                tx_info->tid = tid;
 627                cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
 628
 629                cxgb4_free_atid(t, atid);
 630                tx_info->atid = -1;
 631                /* update the connection state */
 632                chcr_ktls_update_connection_state(tx_info,
 633                                                  KTLS_CONN_ACT_OPEN_RPL);
 634        }
 635        return 0;
 636}
 637
 638/*
 639 * chcr_ktls_cpl_set_tcb_rpl: TCB reply received from TP.
 640 */
 641int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
 642{
 643        const struct cpl_set_tcb_rpl *p = (void *)input;
 644        struct chcr_ktls_info *tx_info = NULL;
 645        struct tid_info *t;
 646        u32 tid;
 647
 648        tid = GET_TID(p);
 649
 650        t = &adap->tids;
 651        tx_info = lookup_tid(t, tid);
 652        if (!tx_info || tx_info->tid != tid) {
 653                pr_err("tx_info or atid is not correct\n");
 654                return -1;
 655        }
 656        /* update the connection state */
 657        chcr_ktls_update_connection_state(tx_info, KTLS_CONN_SET_TCB_RPL);
 658        return 0;
 659}
 660
 661static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
 662                                        u32 tid, void *pos, u16 word, u64 mask,
 663                                        u64 val, u32 reply)
 664{
 665        struct cpl_set_tcb_field_core *cpl;
 666        struct ulptx_idata *idata;
 667        struct ulp_txpkt *txpkt;
 668
 669        /* ULP_TXPKT */
 670        txpkt = pos;
 671        txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
 672        txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
 673
 674        /* ULPTX_IDATA sub-command */
 675        idata = (struct ulptx_idata *)(txpkt + 1);
 676        idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
 677        idata->len = htonl(sizeof(*cpl));
 678        pos = idata + 1;
 679
 680        cpl = pos;
 681        /* CPL_SET_TCB_FIELD */
 682        OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
 683        cpl->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
 684                        NO_REPLY_V(!reply));
 685        cpl->word_cookie = htons(TCB_WORD_V(word));
 686        cpl->mask = cpu_to_be64(mask);
 687        cpl->val = cpu_to_be64(val);
 688
 689        /* ULPTX_NOOP */
 690        idata = (struct ulptx_idata *)(cpl + 1);
 691        idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
 692        idata->len = htonl(0);
 693        pos = idata + 1;
 694
 695        return pos;
 696}
 697
 698
 699/*
 700 * chcr_write_cpl_set_tcb_ulp: update tcb values.
 701 * TCB is responsible to create tcp headers, so all the related values
 702 * should be correctly updated.
 703 * @tx_info - driver specific tls info.
 704 * @q - tx queue on which packet is going out.
 705 * @tid - TCB identifier.
 706 * @pos - current index where should we start writing.
 707 * @word - TCB word.
 708 * @mask - TCB word related mask.
 709 * @val - TCB word related value.
 710 * @reply - set 1 if looking for TP response.
 711 * return - next position to write.
 712 */
 713static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
 714                                        struct sge_eth_txq *q, u32 tid,
 715                                        void *pos, u16 word, u64 mask,
 716                                        u64 val, u32 reply)
 717{
 718        int left = (void *)q->q.stat - pos;
 719
 720        if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
 721                if (!left) {
 722                        pos = q->q.desc;
 723                } else {
 724                        u8 buf[48] = {0};
 725
 726                        __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word,
 727                                                     mask, val, reply);
 728
 729                        return chcr_copy_to_txd(buf, &q->q, pos,
 730                                                CHCR_SET_TCB_FIELD_LEN);
 731                }
 732        }
 733
 734        pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word,
 735                                           mask, val, reply);
 736
 737        /* check again if we are at the end of the queue */
 738        if (left == CHCR_SET_TCB_FIELD_LEN)
 739                pos = q->q.desc;
 740
 741        return pos;
 742}
 743
 744/*
 745 * chcr_ktls_xmit_tcb_cpls: update tcb entry so that TP will create the header
 746 * with updated values like tcp seq, ack, window etc.
 747 * @tx_info - driver specific tls info.
 748 * @q - TX queue.
 749 * @tcp_seq
 750 * @tcp_ack
 751 * @tcp_win
 752 * return: NETDEV_TX_BUSY/NET_TX_OK.
 753 */
 754static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
 755                                   struct sge_eth_txq *q, u64 tcp_seq,
 756                                   u64 tcp_ack, u64 tcp_win)
 757{
 758        bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
 759        u32 len, cpl = 0, ndesc, wr_len;
 760        struct fw_ulptx_wr *wr;
 761        int credits;
 762        void *pos;
 763
 764        wr_len = sizeof(*wr);
 765        /* there can be max 4 cpls, check if we have enough credits */
 766        len = wr_len + 4 * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
 767        ndesc = DIV_ROUND_UP(len, 64);
 768
 769        credits = chcr_txq_avail(&q->q) - ndesc;
 770        if (unlikely(credits < 0)) {
 771                chcr_eth_txq_stop(q);
 772                return NETDEV_TX_BUSY;
 773        }
 774
 775        pos = &q->q.desc[q->q.pidx];
 776        /* make space for WR, we'll fill it later when we know all the cpls
 777         * being sent out and have complete length.
 778         */
 779        wr = pos;
 780        pos += wr_len;
 781        /* update tx_max if its a re-transmit or the first wr */
 782        if (first_wr || tcp_seq != tx_info->prev_seq) {
 783                pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
 784                                                 TCB_TX_MAX_W,
 785                                                 TCB_TX_MAX_V(TCB_TX_MAX_M),
 786                                                 TCB_TX_MAX_V(tcp_seq), 0);
 787                cpl++;
 788        }
 789        /* reset snd una if it's a re-transmit pkt */
 790        if (tcp_seq != tx_info->prev_seq) {
 791                /* reset snd_una */
 792                pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
 793                                                 TCB_SND_UNA_RAW_W,
 794                                                 TCB_SND_UNA_RAW_V
 795                                                 (TCB_SND_UNA_RAW_M),
 796                                                 TCB_SND_UNA_RAW_V(0), 0);
 797                atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_ooo);
 798                cpl++;
 799        }
 800        /* update ack */
 801        if (first_wr || tx_info->prev_ack != tcp_ack) {
 802                pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
 803                                                 TCB_RCV_NXT_W,
 804                                                 TCB_RCV_NXT_V(TCB_RCV_NXT_M),
 805                                                 TCB_RCV_NXT_V(tcp_ack), 0);
 806                tx_info->prev_ack = tcp_ack;
 807                cpl++;
 808        }
 809        /* update receive window */
 810        if (first_wr || tx_info->prev_win != tcp_win) {
 811                pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
 812                                                 TCB_RCV_WND_W,
 813                                                 TCB_RCV_WND_V(TCB_RCV_WND_M),
 814                                                 TCB_RCV_WND_V(tcp_win), 0);
 815                tx_info->prev_win = tcp_win;
 816                cpl++;
 817        }
 818
 819        if (cpl) {
 820                /* get the actual length */
 821                len = wr_len + cpl * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
 822                /* ULPTX wr */
 823                wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
 824                wr->cookie = 0;
 825                /* fill len in wr field */
 826                wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
 827
 828                ndesc = DIV_ROUND_UP(len, 64);
 829                chcr_txq_advance(&q->q, ndesc);
 830                cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
 831        }
 832        return 0;
 833}
 834
 835/*
 836 * chcr_ktls_skb_copy
 837 * @nskb - new skb where the frags to be added.
 838 * @skb - old skb from which frags will be copied.
 839 */
 840static void chcr_ktls_skb_copy(struct sk_buff *skb, struct sk_buff *nskb)
 841{
 842        int i;
 843
 844        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 845                skb_shinfo(nskb)->frags[i] = skb_shinfo(skb)->frags[i];
 846                __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
 847        }
 848
 849        skb_shinfo(nskb)->nr_frags = skb_shinfo(skb)->nr_frags;
 850        nskb->len += skb->data_len;
 851        nskb->data_len = skb->data_len;
 852        nskb->truesize += skb->data_len;
 853}
 854
 855/*
 856 * chcr_ktls_get_tx_flits
 857 * returns number of flits to be sent out, it includes key context length, WR
 858 * size and skb fragments.
 859 */
 860static unsigned int
 861chcr_ktls_get_tx_flits(const struct sk_buff *skb, unsigned int key_ctx_len)
 862{
 863        return chcr_sgl_len(skb_shinfo(skb)->nr_frags) +
 864               DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8);
 865}
 866
 867/*
 868 * chcr_ktls_check_tcp_options: To check if there is any TCP option availbale
 869 * other than timestamp.
 870 * @skb - skb contains partial record..
 871 * return: 1 / 0
 872 */
 873static int
 874chcr_ktls_check_tcp_options(struct tcphdr *tcp)
 875{
 876        int cnt, opt, optlen;
 877        u_char *cp;
 878
 879        cp = (u_char *)(tcp + 1);
 880        cnt = (tcp->doff << 2) - sizeof(struct tcphdr);
 881        for (; cnt > 0; cnt -= optlen, cp += optlen) {
 882                opt = cp[0];
 883                if (opt == TCPOPT_EOL)
 884                        break;
 885                if (opt == TCPOPT_NOP) {
 886                        optlen = 1;
 887                } else {
 888                        if (cnt < 2)
 889                                break;
 890                        optlen = cp[1];
 891                        if (optlen < 2 || optlen > cnt)
 892                                break;
 893                }
 894                switch (opt) {
 895                case TCPOPT_NOP:
 896                        break;
 897                default:
 898                        return 1;
 899                }
 900        }
 901        return 0;
 902}
 903
 904/*
 905 * chcr_ktls_write_tcp_options : TP can't send out all the options, we need to
 906 * send out separately.
 907 * @tx_info - driver specific tls info.
 908 * @skb - skb contains partial record..
 909 * @q - TX queue.
 910 * @tx_chan - channel number.
 911 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
 912 */
 913static int
 914chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
 915                            struct sge_eth_txq *q, uint32_t tx_chan)
 916{
 917        struct fw_eth_tx_pkt_wr *wr;
 918        struct cpl_tx_pkt_core *cpl;
 919        u32 ctrl, iplen, maclen;
 920#if IS_ENABLED(CONFIG_IPV6)
 921        struct ipv6hdr *ip6;
 922#endif
 923        unsigned int ndesc;
 924        struct tcphdr *tcp;
 925        int len16, pktlen;
 926        struct iphdr *ip;
 927        int credits;
 928        u8 buf[150];
 929        void *pos;
 930
 931        iplen = skb_network_header_len(skb);
 932        maclen = skb_mac_header_len(skb);
 933
 934        /* packet length = eth hdr len + ip hdr len + tcp hdr len
 935         * (including options).
 936         */
 937        pktlen = skb->len - skb->data_len;
 938
 939        ctrl = sizeof(*cpl) + pktlen;
 940        len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
 941        /* check how many descriptors needed */
 942        ndesc = DIV_ROUND_UP(len16, 4);
 943
 944        credits = chcr_txq_avail(&q->q) - ndesc;
 945        if (unlikely(credits < 0)) {
 946                chcr_eth_txq_stop(q);
 947                return NETDEV_TX_BUSY;
 948        }
 949
 950        pos = &q->q.desc[q->q.pidx];
 951        wr = pos;
 952
 953        /* Firmware work request header */
 954        wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
 955                               FW_WR_IMMDLEN_V(ctrl));
 956
 957        wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(len16));
 958        wr->r3 = 0;
 959
 960        cpl = (void *)(wr + 1);
 961
 962        /* CPL header */
 963        cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) | TXPKT_INTF_V(tx_chan) |
 964                           TXPKT_PF_V(tx_info->adap->pf));
 965        cpl->pack = 0;
 966        cpl->len = htons(pktlen);
 967        /* checksum offload */
 968        cpl->ctrl1 = 0;
 969
 970        pos = cpl + 1;
 971
 972        memcpy(buf, skb->data, pktlen);
 973        if (tx_info->ip_family == AF_INET) {
 974                /* we need to correct ip header len */
 975                ip = (struct iphdr *)(buf + maclen);
 976                ip->tot_len = htons(pktlen - maclen);
 977#if IS_ENABLED(CONFIG_IPV6)
 978        } else {
 979                ip6 = (struct ipv6hdr *)(buf + maclen);
 980                ip6->payload_len = htons(pktlen - maclen - iplen);
 981#endif
 982        }
 983        /* now take care of the tcp header, if fin is not set then clear push
 984         * bit as well, and if fin is set, it will be sent at the last so we
 985         * need to update the tcp sequence number as per the last packet.
 986         */
 987        tcp = (struct tcphdr *)(buf + maclen + iplen);
 988
 989        if (!tcp->fin)
 990                tcp->psh = 0;
 991        else
 992                tcp->seq = htonl(tx_info->prev_seq);
 993
 994        chcr_copy_to_txd(buf, &q->q, pos, pktlen);
 995
 996        chcr_txq_advance(&q->q, ndesc);
 997        cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
 998        return 0;
 999}
1000
1001/* chcr_ktls_skb_shift - Shifts request length paged data from skb to another.
1002 * @tgt- buffer into which tail data gets added
1003 * @skb- buffer from which the paged data comes from
1004 * @shiftlen- shift up to this many bytes
1005 */
1006static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
1007                               int shiftlen)
1008{
1009        skb_frag_t *fragfrom, *fragto;
1010        int from, to, todo;
1011
1012        WARN_ON(shiftlen > skb->data_len);
1013
1014        todo = shiftlen;
1015        from = 0;
1016        to = 0;
1017        fragfrom = &skb_shinfo(skb)->frags[from];
1018
1019        while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
1020                fragfrom = &skb_shinfo(skb)->frags[from];
1021                fragto = &skb_shinfo(tgt)->frags[to];
1022
1023                if (todo >= skb_frag_size(fragfrom)) {
1024                        *fragto = *fragfrom;
1025                        todo -= skb_frag_size(fragfrom);
1026                        from++;
1027                        to++;
1028
1029                } else {
1030                        __skb_frag_ref(fragfrom);
1031                        skb_frag_page_copy(fragto, fragfrom);
1032                        skb_frag_off_copy(fragto, fragfrom);
1033                        skb_frag_size_set(fragto, todo);
1034
1035                        skb_frag_off_add(fragfrom, todo);
1036                        skb_frag_size_sub(fragfrom, todo);
1037                        todo = 0;
1038
1039                        to++;
1040                        break;
1041                }
1042        }
1043
1044        /* Ready to "commit" this state change to tgt */
1045        skb_shinfo(tgt)->nr_frags = to;
1046
1047        /* Reposition in the original skb */
1048        to = 0;
1049        while (from < skb_shinfo(skb)->nr_frags)
1050                skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
1051
1052        skb_shinfo(skb)->nr_frags = to;
1053
1054        WARN_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
1055
1056        skb->len -= shiftlen;
1057        skb->data_len -= shiftlen;
1058        skb->truesize -= shiftlen;
1059        tgt->len += shiftlen;
1060        tgt->data_len += shiftlen;
1061        tgt->truesize += shiftlen;
1062
1063        return shiftlen;
1064}
1065
1066/*
1067 * chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
1068 * received has partial end part of the record, send out the complete record, so
1069 * that crypto block will be able to generate TAG/HASH.
1070 * @skb - segment which has complete or partial end part.
1071 * @tx_info - driver specific tls info.
1072 * @q - TX queue.
1073 * @tcp_seq
1074 * @tcp_push - tcp push bit.
1075 * @mss - segment size.
1076 * return: NETDEV_TX_BUSY/NET_TX_OK.
1077 */
1078static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
1079                                      struct chcr_ktls_info *tx_info,
1080                                      struct sge_eth_txq *q, u32 tcp_seq,
1081                                      bool tcp_push, u32 mss)
1082{
1083        u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start;
1084        struct adapter *adap = tx_info->adap;
1085        int credits, left, last_desc;
1086        struct tx_sw_desc *sgl_sdesc;
1087        struct cpl_tx_data *tx_data;
1088        struct cpl_tx_sec_pdu *cpl;
1089        struct ulptx_idata *idata;
1090        struct ulp_txpkt *ulptx;
1091        struct fw_ulptx_wr *wr;
1092        void *pos;
1093        u64 *end;
1094
1095        /* get the number of flits required */
1096        flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len);
1097        /* number of descriptors */
1098        ndesc = chcr_flits_to_desc(flits);
1099        /* check if enough credits available */
1100        credits = chcr_txq_avail(&q->q) - ndesc;
1101        if (unlikely(credits < 0)) {
1102                chcr_eth_txq_stop(q);
1103                return NETDEV_TX_BUSY;
1104        }
1105
1106        if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1107                /* Credits are below the threshold vaues, stop the queue after
1108                 * injecting the Work Request for this packet.
1109                 */
1110                chcr_eth_txq_stop(q);
1111                wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1112        }
1113
1114        last_desc = q->q.pidx + ndesc - 1;
1115        if (last_desc >= q->q.size)
1116                last_desc -= q->q.size;
1117        sgl_sdesc = &q->q.sdesc[last_desc];
1118
1119        if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
1120                memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1121                q->mapping_err++;
1122                return NETDEV_TX_BUSY;
1123        }
1124
1125        pos = &q->q.desc[q->q.pidx];
1126        end = (u64 *)pos + flits;
1127        /* FW_ULPTX_WR */
1128        wr = pos;
1129        /* WR will need len16 */
1130        len16 = DIV_ROUND_UP(flits, 2);
1131        wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1132        wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
1133        wr->cookie = 0;
1134        pos += sizeof(*wr);
1135        /* ULP_TXPKT */
1136        ulptx = pos;
1137        ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
1138                                ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
1139                                ULP_TXPKT_FID_V(q->q.cntxt_id) |
1140                                ULP_TXPKT_RO_F);
1141        ulptx->len = htonl(len16 - 1);
1142        /* ULPTX_IDATA sub-command */
1143        idata = (struct ulptx_idata *)(ulptx + 1);
1144        idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
1145        /* idata length will include cpl_tx_sec_pdu + key context size +
1146         * cpl_tx_data header.
1147         */
1148        idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
1149                           sizeof(*tx_data));
1150        /* SEC CPL */
1151        cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
1152        cpl->op_ivinsrtofst =
1153                htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
1154                      CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
1155                      CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
1156                      CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1));
1157        cpl->pldlen = htonl(skb->data_len);
1158
1159        /* encryption should start after tls header size + iv size */
1160        cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1;
1161
1162        cpl->aadstart_cipherstop_hi =
1163                htonl(CPL_TX_SEC_PDU_AADSTART_V(1) |
1164                      CPL_TX_SEC_PDU_AADSTOP_V(TLS_HEADER_SIZE) |
1165                      CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
1166
1167        /* authentication will also start after tls header + iv size */
1168        cpl->cipherstop_lo_authinsert =
1169        htonl(CPL_TX_SEC_PDU_AUTHSTART_V(cipher_start) |
1170              CPL_TX_SEC_PDU_AUTHSTOP_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE) |
1171              CPL_TX_SEC_PDU_AUTHINSERT_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE));
1172
1173        /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1174        cpl->seqno_numivs = htonl(tx_info->scmd0_seqno_numivs);
1175        cpl->ivgen_hdrlen = htonl(tx_info->scmd0_ivgen_hdrlen);
1176        cpl->scmd1 = cpu_to_be64(tx_info->record_no);
1177
1178        pos = cpl + 1;
1179        /* check if space left to fill the keys */
1180        left = (void *)q->q.stat - pos;
1181        if (!left) {
1182                left = (void *)end - (void *)q->q.stat;
1183                pos = q->q.desc;
1184                end = pos + left;
1185        }
1186
1187        pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
1188                               tx_info->key_ctx_len);
1189        left = (void *)q->q.stat - pos;
1190
1191        if (!left) {
1192                left = (void *)end - (void *)q->q.stat;
1193                pos = q->q.desc;
1194                end = pos + left;
1195        }
1196        /* CPL_TX_DATA */
1197        tx_data = (void *)pos;
1198        OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
1199        tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(skb->data_len));
1200
1201        tx_data->rsvd = htonl(tcp_seq);
1202
1203        tx_data->flags = htonl(TX_BYPASS_F);
1204        if (tcp_push)
1205                tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
1206
1207        /* check left again, it might go beyond queue limit */
1208        pos = tx_data + 1;
1209        left = (void *)q->q.stat - pos;
1210
1211        /* check the position again */
1212        if (!left) {
1213                left = (void *)end - (void *)q->q.stat;
1214                pos = q->q.desc;
1215                end = pos + left;
1216        }
1217
1218        /* send the complete packet except the header */
1219        cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
1220                        sgl_sdesc->addr);
1221        sgl_sdesc->skb = skb;
1222
1223        chcr_txq_advance(&q->q, ndesc);
1224        cxgb4_ring_tx_db(adap, &q->q, ndesc);
1225        atomic64_inc(&adap->chcr_stats.ktls_tx_send_records);
1226
1227        return 0;
1228}
1229
1230/*
1231 * chcr_ktls_xmit_wr_short: This is to send out partial records. If its
1232 * a middle part of a record, fetch the prior data to make it 16 byte aligned
1233 * and then only send it out.
1234 *
1235 * @skb - skb contains partial record..
1236 * @tx_info - driver specific tls info.
1237 * @q - TX queue.
1238 * @tcp_seq
1239 * @tcp_push - tcp push bit.
1240 * @mss - segment size.
1241 * @tls_rec_offset - offset from start of the tls record.
1242 * @perior_data - data before the current segment, required to make this record
1243 *                16 byte aligned.
1244 * @prior_data_len - prior_data length (less than 16)
1245 * return: NETDEV_TX_BUSY/NET_TX_OK.
1246 */
1247static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
1248                                   struct chcr_ktls_info *tx_info,
1249                                   struct sge_eth_txq *q,
1250                                   u32 tcp_seq, bool tcp_push, u32 mss,
1251                                   u32 tls_rec_offset, u8 *prior_data,
1252                                   u32 prior_data_len)
1253{
1254        struct adapter *adap = tx_info->adap;
1255        u32 len16, wr_mid = 0, cipher_start;
1256        unsigned int flits = 0, ndesc;
1257        int credits, left, last_desc;
1258        struct tx_sw_desc *sgl_sdesc;
1259        struct cpl_tx_data *tx_data;
1260        struct cpl_tx_sec_pdu *cpl;
1261        struct ulptx_idata *idata;
1262        struct ulp_txpkt *ulptx;
1263        struct fw_ulptx_wr *wr;
1264        __be64 iv_record;
1265        void *pos;
1266        u64 *end;
1267
1268        /* get the number of flits required, it's a partial record so 2 flits
1269         * (AES_BLOCK_SIZE) will be added.
1270         */
1271        flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len) + 2;
1272        /* get the correct 8 byte IV of this record */
1273        iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no);
1274        /* If it's a middle record and not 16 byte aligned to run AES CTR, need
1275         * to make it 16 byte aligned. So atleadt 2 extra flits of immediate
1276         * data will be added.
1277         */
1278        if (prior_data_len)
1279                flits += 2;
1280        /* number of descriptors */
1281        ndesc = chcr_flits_to_desc(flits);
1282        /* check if enough credits available */
1283        credits = chcr_txq_avail(&q->q) - ndesc;
1284        if (unlikely(credits < 0)) {
1285                chcr_eth_txq_stop(q);
1286                return NETDEV_TX_BUSY;
1287        }
1288
1289        if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1290                chcr_eth_txq_stop(q);
1291                wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1292        }
1293
1294        last_desc = q->q.pidx + ndesc - 1;
1295        if (last_desc >= q->q.size)
1296                last_desc -= q->q.size;
1297        sgl_sdesc = &q->q.sdesc[last_desc];
1298
1299        if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
1300                memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1301                q->mapping_err++;
1302                return NETDEV_TX_BUSY;
1303        }
1304
1305        pos = &q->q.desc[q->q.pidx];
1306        end = (u64 *)pos + flits;
1307        /* FW_ULPTX_WR */
1308        wr = pos;
1309        /* WR will need len16 */
1310        len16 = DIV_ROUND_UP(flits, 2);
1311        wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1312        wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
1313        wr->cookie = 0;
1314        pos += sizeof(*wr);
1315        /* ULP_TXPKT */
1316        ulptx = pos;
1317        ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
1318                                ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
1319                                ULP_TXPKT_FID_V(q->q.cntxt_id) |
1320                                ULP_TXPKT_RO_F);
1321        ulptx->len = htonl(len16 - 1);
1322        /* ULPTX_IDATA sub-command */
1323        idata = (struct ulptx_idata *)(ulptx + 1);
1324        idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
1325        /* idata length will include cpl_tx_sec_pdu + key context size +
1326         * cpl_tx_data header.
1327         */
1328        idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
1329                           sizeof(*tx_data) + AES_BLOCK_LEN + prior_data_len);
1330        /* SEC CPL */
1331        cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
1332        /* cipher start will have tls header + iv size extra if its a header
1333         * part of tls record. else only 16 byte IV will be added.
1334         */
1335        cipher_start =
1336                AES_BLOCK_LEN + 1 +
1337                (!tls_rec_offset ? TLS_HEADER_SIZE + tx_info->iv_size : 0);
1338
1339        cpl->op_ivinsrtofst =
1340                htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
1341                      CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
1342                      CPL_TX_SEC_PDU_IVINSRTOFST_V(1));
1343        cpl->pldlen = htonl(skb->data_len + AES_BLOCK_LEN + prior_data_len);
1344        cpl->aadstart_cipherstop_hi =
1345                htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
1346        cpl->cipherstop_lo_authinsert = 0;
1347        /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1348        cpl->seqno_numivs = htonl(tx_info->scmd0_short_seqno_numivs);
1349        cpl->ivgen_hdrlen = htonl(tx_info->scmd0_short_ivgen_hdrlen);
1350        cpl->scmd1 = 0;
1351
1352        pos = cpl + 1;
1353        /* check if space left to fill the keys */
1354        left = (void *)q->q.stat - pos;
1355        if (!left) {
1356                left = (void *)end - (void *)q->q.stat;
1357                pos = q->q.desc;
1358                end = pos + left;
1359        }
1360
1361        pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
1362                               tx_info->key_ctx_len);
1363        left = (void *)q->q.stat - pos;
1364
1365        if (!left) {
1366                left = (void *)end - (void *)q->q.stat;
1367                pos = q->q.desc;
1368                end = pos + left;
1369        }
1370        /* CPL_TX_DATA */
1371        tx_data = (void *)pos;
1372        OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
1373        tx_data->len = htonl(TX_DATA_MSS_V(mss) |
1374                        TX_LENGTH_V(skb->data_len + prior_data_len));
1375        tx_data->rsvd = htonl(tcp_seq);
1376        tx_data->flags = htonl(TX_BYPASS_F);
1377        if (tcp_push)
1378                tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
1379
1380        /* check left again, it might go beyond queue limit */
1381        pos = tx_data + 1;
1382        left = (void *)q->q.stat - pos;
1383
1384        /* check the position again */
1385        if (!left) {
1386                left = (void *)end - (void *)q->q.stat;
1387                pos = q->q.desc;
1388                end = pos + left;
1389        }
1390        /* copy the 16 byte IV for AES-CTR, which includes 4 bytes of salt, 8
1391         * bytes of actual IV and 4 bytes of 16 byte-sequence.
1392         */
1393        memcpy(pos, tx_info->key_ctx.salt, tx_info->salt_size);
1394        memcpy(pos + tx_info->salt_size, &iv_record, tx_info->iv_size);
1395        *(__be32 *)(pos + tx_info->salt_size + tx_info->iv_size) =
1396                htonl(2 + (tls_rec_offset ? ((tls_rec_offset -
1397                (TLS_HEADER_SIZE + tx_info->iv_size)) / AES_BLOCK_LEN) : 0));
1398
1399        pos += 16;
1400        /* Prior_data_len will always be less than 16 bytes, fill the
1401         * prio_data_len after AES_CTRL_BLOCK and clear the remaining length
1402         * to 0.
1403         */
1404        if (prior_data_len)
1405                pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
1406        /* send the complete packet except the header */
1407        cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
1408                        sgl_sdesc->addr);
1409        sgl_sdesc->skb = skb;
1410
1411        chcr_txq_advance(&q->q, ndesc);
1412        cxgb4_ring_tx_db(adap, &q->q, ndesc);
1413
1414        return 0;
1415}
1416
1417/*
1418 * chcr_ktls_tx_plaintxt: This handler will take care of the records which has
1419 * only plain text (only tls header and iv)
1420 * @tx_info - driver specific tls info.
1421 * @skb - skb contains partial record..
1422 * @tcp_seq
1423 * @mss - segment size.
1424 * @tcp_push - tcp push bit.
1425 * @q - TX queue.
1426 * @port_id : port number
1427 * @perior_data - data before the current segment, required to make this record
1428 *               16 byte aligned.
1429 * @prior_data_len - prior_data length (less than 16)
1430 * return: NETDEV_TX_BUSY/NET_TX_OK.
1431 */
1432static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
1433                                 struct sk_buff *skb, u32 tcp_seq, u32 mss,
1434                                 bool tcp_push, struct sge_eth_txq *q,
1435                                 u32 port_id, u8 *prior_data,
1436                                 u32 prior_data_len)
1437{
1438        int credits, left, len16, last_desc;
1439        unsigned int flits = 0, ndesc;
1440        struct tx_sw_desc *sgl_sdesc;
1441        struct cpl_tx_data *tx_data;
1442        struct ulptx_idata *idata;
1443        struct ulp_txpkt *ulptx;
1444        struct fw_ulptx_wr *wr;
1445        u32 wr_mid = 0;
1446        void *pos;
1447        u64 *end;
1448
1449        flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8);
1450        flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags);
1451        if (prior_data_len)
1452                flits += 2;
1453        /* WR will need len16 */
1454        len16 = DIV_ROUND_UP(flits, 2);
1455        /* check how many descriptors needed */
1456        ndesc = DIV_ROUND_UP(flits, 8);
1457
1458        credits = chcr_txq_avail(&q->q) - ndesc;
1459        if (unlikely(credits < 0)) {
1460                chcr_eth_txq_stop(q);
1461                return NETDEV_TX_BUSY;
1462        }
1463
1464        if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1465                chcr_eth_txq_stop(q);
1466                wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1467        }
1468
1469        last_desc = q->q.pidx + ndesc - 1;
1470        if (last_desc >= q->q.size)
1471                last_desc -= q->q.size;
1472        sgl_sdesc = &q->q.sdesc[last_desc];
1473
1474        if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
1475                                   sgl_sdesc->addr) < 0)) {
1476                memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1477                q->mapping_err++;
1478                return NETDEV_TX_BUSY;
1479        }
1480
1481        pos = &q->q.desc[q->q.pidx];
1482        end = (u64 *)pos + flits;
1483        /* FW_ULPTX_WR */
1484        wr = pos;
1485        wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1486        wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
1487        wr->cookie = 0;
1488        pos += sizeof(*wr);
1489        /* ULP_TXPKT */
1490        ulptx = (struct ulp_txpkt *)(wr + 1);
1491        ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
1492                        ULP_TXPKT_DATAMODIFY_V(0) |
1493                        ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
1494                        ULP_TXPKT_DEST_V(0) |
1495                        ULP_TXPKT_FID_V(q->q.cntxt_id) | ULP_TXPKT_RO_V(1));
1496        ulptx->len = htonl(len16 - 1);
1497        /* ULPTX_IDATA sub-command */
1498        idata = (struct ulptx_idata *)(ulptx + 1);
1499        idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
1500        idata->len = htonl(sizeof(*tx_data) + prior_data_len);
1501        /* CPL_TX_DATA */
1502        tx_data = (struct cpl_tx_data *)(idata + 1);
1503        OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
1504        tx_data->len = htonl(TX_DATA_MSS_V(mss) |
1505                        TX_LENGTH_V(skb->data_len + prior_data_len));
1506        /* set tcp seq number */
1507        tx_data->rsvd = htonl(tcp_seq);
1508        tx_data->flags = htonl(TX_BYPASS_F);
1509        if (tcp_push)
1510                tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
1511
1512        pos = tx_data + 1;
1513        /* apart from prior_data_len, we should set remaining part of 16 bytes
1514         * to be zero.
1515         */
1516        if (prior_data_len)
1517                pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
1518
1519        /* check left again, it might go beyond queue limit */
1520        left = (void *)q->q.stat - pos;
1521
1522        /* check the position again */
1523        if (!left) {
1524                left = (void *)end - (void *)q->q.stat;
1525                pos = q->q.desc;
1526                end = pos + left;
1527        }
1528        /* send the complete packet including the header */
1529        cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
1530                        sgl_sdesc->addr);
1531        sgl_sdesc->skb = skb;
1532
1533        chcr_txq_advance(&q->q, ndesc);
1534        cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
1535        return 0;
1536}
1537
1538/*
1539 * chcr_ktls_copy_record_in_skb
1540 * @nskb - new skb where the frags to be added.
1541 * @record - specific record which has complete 16k record in frags.
1542 */
1543static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
1544                                         struct tls_record_info *record)
1545{
1546        int i = 0;
1547
1548        for (i = 0; i < record->num_frags; i++) {
1549                skb_shinfo(nskb)->frags[i] = record->frags[i];
1550                /* increase the frag ref count */
1551                __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
1552        }
1553
1554        skb_shinfo(nskb)->nr_frags = record->num_frags;
1555        nskb->data_len = record->len;
1556        nskb->len += record->len;
1557        nskb->truesize += record->len;
1558}
1559
1560/*
1561 * chcr_ktls_update_snd_una:  Reset the SEND_UNA. It will be done to avoid
1562 * sending the same segment again. It will discard the segment which is before
1563 * the current tx max.
1564 * @tx_info - driver specific tls info.
1565 * @q - TX queue.
1566 * return: NET_TX_OK/NET_XMIT_DROP.
1567 */
1568static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
1569                                    struct sge_eth_txq *q)
1570{
1571        struct fw_ulptx_wr *wr;
1572        unsigned int ndesc;
1573        int credits;
1574        void *pos;
1575        u32 len;
1576
1577        len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
1578        ndesc = DIV_ROUND_UP(len, 64);
1579
1580        credits = chcr_txq_avail(&q->q) - ndesc;
1581        if (unlikely(credits < 0)) {
1582                chcr_eth_txq_stop(q);
1583                return NETDEV_TX_BUSY;
1584        }
1585
1586        pos = &q->q.desc[q->q.pidx];
1587
1588        wr = pos;
1589        /* ULPTX wr */
1590        wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
1591        wr->cookie = 0;
1592        /* fill len in wr field */
1593        wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
1594
1595        pos += sizeof(*wr);
1596
1597        pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
1598                                         TCB_SND_UNA_RAW_W,
1599                                         TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
1600                                         TCB_SND_UNA_RAW_V(0), 0);
1601
1602        chcr_txq_advance(&q->q, ndesc);
1603        cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
1604
1605        return 0;
1606}
1607
1608/*
1609 * chcr_end_part_handler: This handler will handle the record which
1610 * is complete or if record's end part is received. T6 adapter has a issue that
1611 * it can't send out TAG with partial record so if its an end part then we have
1612 * to send TAG as well and for which we need to fetch the complete record and
1613 * send it to crypto module.
1614 * @tx_info - driver specific tls info.
1615 * @skb - skb contains partial record.
1616 * @record - complete record of 16K size.
1617 * @tcp_seq
1618 * @mss - segment size in which TP needs to chop a packet.
1619 * @tcp_push_no_fin - tcp push if fin is not set.
1620 * @q - TX queue.
1621 * @tls_end_offset - offset from end of the record.
1622 * @last wr : check if this is the last part of the skb going out.
1623 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
1624 */
1625static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
1626                                 struct sk_buff *skb,
1627                                 struct tls_record_info *record,
1628                                 u32 tcp_seq, int mss, bool tcp_push_no_fin,
1629                                 struct sge_eth_txq *q,
1630                                 u32 tls_end_offset, bool last_wr)
1631{
1632        struct sk_buff *nskb = NULL;
1633        /* check if it is a complete record */
1634        if (tls_end_offset == record->len) {
1635                nskb = skb;
1636                atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_complete_pkts);
1637        } else {
1638                dev_kfree_skb_any(skb);
1639
1640                nskb = alloc_skb(0, GFP_KERNEL);
1641                if (!nskb)
1642                        return NETDEV_TX_BUSY;
1643                /* copy complete record in skb */
1644                chcr_ktls_copy_record_in_skb(nskb, record);
1645                /* packet is being sent from the beginning, update the tcp_seq
1646                 * accordingly.
1647                 */
1648                tcp_seq = tls_record_start_seq(record);
1649                /* reset snd una, so the middle record won't send the already
1650                 * sent part.
1651                 */
1652                if (chcr_ktls_update_snd_una(tx_info, q))
1653                        goto out;
1654                atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_end_pkts);
1655        }
1656
1657        if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
1658                                       (last_wr && tcp_push_no_fin),
1659                                       mss)) {
1660                goto out;
1661        }
1662        return 0;
1663out:
1664        dev_kfree_skb_any(nskb);
1665        return NETDEV_TX_BUSY;
1666}
1667
1668/*
1669 * chcr_short_record_handler: This handler will take care of the records which
1670 * doesn't have end part (1st part or the middle part(/s) of a record). In such
1671 * cases, AES CTR will be used in place of AES GCM to send out partial packet.
1672 * This partial record might be the first part of the record, or the middle
1673 * part. In case of middle record we should fetch the prior data to make it 16
1674 * byte aligned. If it has a partial tls header or iv then get to the start of
1675 * tls header. And if it has partial TAG, then remove the complete TAG and send
1676 * only the payload.
1677 * There is one more possibility that it gets a partial header, send that
1678 * portion as a plaintext.
1679 * @tx_info - driver specific tls info.
1680 * @skb - skb contains partial record..
1681 * @record - complete record of 16K size.
1682 * @tcp_seq
1683 * @mss - segment size in which TP needs to chop a packet.
1684 * @tcp_push_no_fin - tcp push if fin is not set.
1685 * @q - TX queue.
1686 * @tls_end_offset - offset from end of the record.
1687 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
1688 */
1689static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
1690                                     struct sk_buff *skb,
1691                                     struct tls_record_info *record,
1692                                     u32 tcp_seq, int mss, bool tcp_push_no_fin,
1693                                     struct sge_eth_txq *q, u32 tls_end_offset)
1694{
1695        u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record);
1696        u8 prior_data[16] = {0};
1697        u32 prior_data_len = 0;
1698        u32 data_len;
1699
1700        /* check if the skb is ending in middle of tag/HASH, its a big
1701         * trouble, send the packet before the HASH.
1702         */
1703        int remaining_record = tls_end_offset - skb->data_len;
1704
1705        if (remaining_record > 0 &&
1706            remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) {
1707                int trimmed_len = skb->data_len -
1708                        (TLS_CIPHER_AES_GCM_128_TAG_SIZE - remaining_record);
1709                struct sk_buff *tmp_skb = NULL;
1710                /* don't process the pkt if it is only a partial tag */
1711                if (skb->data_len < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
1712                        goto out;
1713
1714                WARN_ON(trimmed_len > skb->data_len);
1715
1716                /* shift to those many bytes */
1717                tmp_skb = alloc_skb(0, GFP_KERNEL);
1718                if (unlikely(!tmp_skb))
1719                        goto out;
1720
1721                chcr_ktls_skb_shift(tmp_skb, skb, trimmed_len);
1722                /* free the last trimmed portion */
1723                dev_kfree_skb_any(skb);
1724                skb = tmp_skb;
1725                atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_trimmed_pkts);
1726        }
1727        data_len = skb->data_len;
1728        /* check if the middle record's start point is 16 byte aligned. CTR
1729         * needs 16 byte aligned start point to start encryption.
1730         */
1731        if (tls_rec_offset) {
1732                /* there is an offset from start, means its a middle record */
1733                int remaining = 0;
1734
1735                if (tls_rec_offset < (TLS_HEADER_SIZE + tx_info->iv_size)) {
1736                        prior_data_len = tls_rec_offset;
1737                        tls_rec_offset = 0;
1738                        remaining = 0;
1739                } else {
1740                        prior_data_len =
1741                                (tls_rec_offset -
1742                                (TLS_HEADER_SIZE + tx_info->iv_size))
1743                                % AES_BLOCK_LEN;
1744                        remaining = tls_rec_offset - prior_data_len;
1745                }
1746
1747                /* if prior_data_len is not zero, means we need to fetch prior
1748                 * data to make this record 16 byte aligned, or we need to reach
1749                 * to start offset.
1750                 */
1751                if (prior_data_len) {
1752                        int i = 0;
1753                        u8 *data = NULL;
1754                        skb_frag_t *f;
1755                        u8 *vaddr;
1756                        int frag_size = 0, frag_delta = 0;
1757
1758                        while (remaining > 0) {
1759                                frag_size = skb_frag_size(&record->frags[i]);
1760                                if (remaining < frag_size)
1761                                        break;
1762
1763                                remaining -= frag_size;
1764                                i++;
1765                        }
1766                        f = &record->frags[i];
1767                        vaddr = kmap_atomic(skb_frag_page(f));
1768
1769                        data = vaddr + skb_frag_off(f)  + remaining;
1770                        frag_delta = skb_frag_size(f) - remaining;
1771
1772                        if (frag_delta >= prior_data_len) {
1773                                memcpy(prior_data, data, prior_data_len);
1774                                kunmap_atomic(vaddr);
1775                        } else {
1776                                memcpy(prior_data, data, frag_delta);
1777                                kunmap_atomic(vaddr);
1778                                /* get the next page */
1779                                f = &record->frags[i + 1];
1780                                vaddr = kmap_atomic(skb_frag_page(f));
1781                                data = vaddr + skb_frag_off(f);
1782                                memcpy(prior_data + frag_delta,
1783                                       data, (prior_data_len - frag_delta));
1784                                kunmap_atomic(vaddr);
1785                        }
1786                        /* reset tcp_seq as per the prior_data_required len */
1787                        tcp_seq -= prior_data_len;
1788                        /* include prio_data_len for  further calculation.
1789                         */
1790                        data_len += prior_data_len;
1791                }
1792                /* reset snd una, so the middle record won't send the already
1793                 * sent part.
1794                 */
1795                if (chcr_ktls_update_snd_una(tx_info, q))
1796                        goto out;
1797                atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_middle_pkts);
1798        } else {
1799                /* Else means, its a partial first part of the record. Check if
1800                 * its only the header, don't need to send for encryption then.
1801                 */
1802                if (data_len <= TLS_HEADER_SIZE + tx_info->iv_size) {
1803                        if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
1804                                                  tcp_push_no_fin, q,
1805                                                  tx_info->port_id,
1806                                                  prior_data,
1807                                                  prior_data_len)) {
1808                                goto out;
1809                        }
1810                        return 0;
1811                }
1812                atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_start_pkts);
1813        }
1814
1815        if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
1816                                    mss, tls_rec_offset, prior_data,
1817                                    prior_data_len)) {
1818                goto out;
1819        }
1820
1821        return 0;
1822out:
1823        dev_kfree_skb_any(skb);
1824        return NETDEV_TX_BUSY;
1825}
1826
1827/* nic tls TX handler */
1828int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
1829{
1830        struct chcr_ktls_ofld_ctx_tx *tx_ctx;
1831        struct tcphdr *th = tcp_hdr(skb);
1832        int data_len, qidx, ret = 0, mss;
1833        struct tls_record_info *record;
1834        struct chcr_stats_debug *stats;
1835        struct chcr_ktls_info *tx_info;
1836        u32 tls_end_offset, tcp_seq;
1837        struct tls_context *tls_ctx;
1838        struct sk_buff *local_skb;
1839        int new_connection_state;
1840        struct sge_eth_txq *q;
1841        struct adapter *adap;
1842        unsigned long flags;
1843
1844        tcp_seq = ntohl(th->seq);
1845
1846        mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : skb->data_len;
1847
1848        /* check if we haven't set it for ktls offload */
1849        if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
1850                goto out;
1851
1852        tls_ctx = tls_get_ctx(skb->sk);
1853        if (unlikely(tls_ctx->netdev != dev))
1854                goto out;
1855
1856        tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
1857        tx_info = tx_ctx->chcr_info;
1858
1859        if (unlikely(!tx_info))
1860                goto out;
1861
1862        /* check the connection state, we don't need to pass new connection
1863         * state, state machine will check and update the new state if it is
1864         * stuck due to responses not received from HW.
1865         * Start the tx handling only if state is KTLS_CONN_TX_READY.
1866         */
1867        new_connection_state = chcr_ktls_update_connection_state(tx_info, 0);
1868        if (new_connection_state != KTLS_CONN_TX_READY)
1869                goto out;
1870
1871        /* don't touch the original skb, make a new skb to extract each records
1872         * and send them separately.
1873         */
1874        local_skb = alloc_skb(0, GFP_KERNEL);
1875
1876        if (unlikely(!local_skb))
1877                return NETDEV_TX_BUSY;
1878
1879        adap = tx_info->adap;
1880        stats = &adap->chcr_stats;
1881
1882        qidx = skb->queue_mapping;
1883        q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
1884        cxgb4_reclaim_completed_tx(adap, &q->q, true);
1885        /* if tcp options are set but finish is not send the options first */
1886        if (!th->fin && chcr_ktls_check_tcp_options(th)) {
1887                ret = chcr_ktls_write_tcp_options(tx_info, skb, q,
1888                                                  tx_info->tx_chan);
1889                if (ret)
1890                        return NETDEV_TX_BUSY;
1891        }
1892        /* update tcb */
1893        ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, ntohl(th->seq),
1894                                      ntohl(th->ack_seq),
1895                                      ntohs(th->window));
1896        if (ret) {
1897                dev_kfree_skb_any(local_skb);
1898                return NETDEV_TX_BUSY;
1899        }
1900
1901        /* copy skb contents into local skb */
1902        chcr_ktls_skb_copy(skb, local_skb);
1903
1904        /* go through the skb and send only one record at a time. */
1905        data_len = skb->data_len;
1906        /* TCP segments can be in received either complete or partial.
1907         * chcr_end_part_handler will handle cases if complete record or end
1908         * part of the record is received. Incase of partial end part of record,
1909         * we will send the complete record again.
1910         */
1911
1912        do {
1913                int i;
1914
1915                cxgb4_reclaim_completed_tx(adap, &q->q, true);
1916                /* lock taken */
1917                spin_lock_irqsave(&tx_ctx->base.lock, flags);
1918                /* fetch the tls record */
1919                record = tls_get_record(&tx_ctx->base, tcp_seq,
1920                                        &tx_info->record_no);
1921                /* By the time packet reached to us, ACK is received, and record
1922                 * won't be found in that case, handle it gracefully.
1923                 */
1924                if (unlikely(!record)) {
1925                        spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
1926                        atomic64_inc(&stats->ktls_tx_drop_no_sync_data);
1927                        goto out;
1928                }
1929
1930                if (unlikely(tls_record_is_start_marker(record))) {
1931                        spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
1932                        atomic64_inc(&stats->ktls_tx_skip_no_sync_data);
1933                        goto out;
1934                }
1935
1936                /* increase page reference count of the record, so that there
1937                 * won't be any chance of page free in middle if in case stack
1938                 * receives ACK and try to delete the record.
1939                 */
1940                for (i = 0; i < record->num_frags; i++)
1941                        __skb_frag_ref(&record->frags[i]);
1942                /* lock cleared */
1943                spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
1944
1945                tls_end_offset = record->end_seq - tcp_seq;
1946
1947                pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
1948                         tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
1949                /* if a tls record is finishing in this SKB */
1950                if (tls_end_offset <= data_len) {
1951                        struct sk_buff *nskb = NULL;
1952
1953                        if (tls_end_offset < data_len) {
1954                                nskb = alloc_skb(0, GFP_KERNEL);
1955                                if (unlikely(!nskb)) {
1956                                        ret = -ENOMEM;
1957                                        goto clear_ref;
1958                                }
1959
1960                                chcr_ktls_skb_shift(nskb, local_skb,
1961                                                    tls_end_offset);
1962                        } else {
1963                                /* its the only record in this skb, directly
1964                                 * point it.
1965                                 */
1966                                nskb = local_skb;
1967                        }
1968                        ret = chcr_end_part_handler(tx_info, nskb, record,
1969                                                    tcp_seq, mss,
1970                                                    (!th->fin && th->psh), q,
1971                                                    tls_end_offset,
1972                                                    (nskb == local_skb));
1973
1974                        if (ret && nskb != local_skb)
1975                                dev_kfree_skb_any(local_skb);
1976
1977                        data_len -= tls_end_offset;
1978                        /* tcp_seq increment is required to handle next record.
1979                         */
1980                        tcp_seq += tls_end_offset;
1981                } else {
1982                        ret = chcr_short_record_handler(tx_info, local_skb,
1983                                                        record, tcp_seq, mss,
1984                                                        (!th->fin && th->psh),
1985                                                        q, tls_end_offset);
1986                        data_len = 0;
1987                }
1988clear_ref:
1989                /* clear the frag ref count which increased locally before */
1990                for (i = 0; i < record->num_frags; i++) {
1991                        /* clear the frag ref count */
1992                        __skb_frag_unref(&record->frags[i]);
1993                }
1994                /* if any failure, come out from the loop. */
1995                if (ret)
1996                        goto out;
1997                /* length should never be less than 0 */
1998                WARN_ON(data_len < 0);
1999
2000        } while (data_len > 0);
2001
2002        tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
2003
2004        atomic64_inc(&stats->ktls_tx_encrypted_packets);
2005        atomic64_add(skb->data_len, &stats->ktls_tx_encrypted_bytes);
2006
2007        /* tcp finish is set, send a separate tcp msg including all the options
2008         * as well.
2009         */
2010        if (th->fin)
2011                chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan);
2012
2013out:
2014        dev_kfree_skb_any(skb);
2015        return NETDEV_TX_OK;
2016}
2017#endif /* CONFIG_CHELSIO_TLS_DEVICE */
2018