linux/drivers/crypto/chelsio/chtls/chtls_hw.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2018 Chelsio Communications, Inc.
   4 *
   5 * Written by: Atul Gupta (atul.gupta@chelsio.com)
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/list.h>
  10#include <linux/workqueue.h>
  11#include <linux/skbuff.h>
  12#include <linux/timer.h>
  13#include <linux/notifier.h>
  14#include <linux/inetdevice.h>
  15#include <linux/ip.h>
  16#include <linux/tcp.h>
  17#include <linux/tls.h>
  18#include <net/tls.h>
  19
  20#include "chtls.h"
  21#include "chtls_cm.h"
  22
  23static void __set_tcb_field_direct(struct chtls_sock *csk,
  24                                   struct cpl_set_tcb_field *req, u16 word,
  25                                   u64 mask, u64 val, u8 cookie, int no_reply)
  26{
  27        struct ulptx_idata *sc;
  28
  29        INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, csk->tid);
  30        req->wr.wr_mid |= htonl(FW_WR_FLOWID_V(csk->tid));
  31        req->reply_ctrl = htons(NO_REPLY_V(no_reply) |
  32                                QUEUENO_V(csk->rss_qid));
  33        req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
  34        req->mask = cpu_to_be64(mask);
  35        req->val = cpu_to_be64(val);
  36        sc = (struct ulptx_idata *)(req + 1);
  37        sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
  38        sc->len = htonl(0);
  39}
  40
  41static void __set_tcb_field(struct sock *sk, struct sk_buff *skb, u16 word,
  42                            u64 mask, u64 val, u8 cookie, int no_reply)
  43{
  44        struct cpl_set_tcb_field *req;
  45        struct chtls_sock *csk;
  46        struct ulptx_idata *sc;
  47        unsigned int wrlen;
  48
  49        wrlen = roundup(sizeof(*req) + sizeof(*sc), 16);
  50        csk = rcu_dereference_sk_user_data(sk);
  51
  52        req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
  53        __set_tcb_field_direct(csk, req, word, mask, val, cookie, no_reply);
  54        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
  55}
  56
  57/*
  58 * Send control message to HW, message go as immediate data and packet
  59 * is freed immediately.
  60 */
  61static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
  62{
  63        struct cpl_set_tcb_field *req;
  64        unsigned int credits_needed;
  65        struct chtls_sock *csk;
  66        struct ulptx_idata *sc;
  67        struct sk_buff *skb;
  68        unsigned int wrlen;
  69        int ret;
  70
  71        wrlen = roundup(sizeof(*req) + sizeof(*sc), 16);
  72
  73        skb = alloc_skb(wrlen, GFP_ATOMIC);
  74        if (!skb)
  75                return -ENOMEM;
  76
  77        credits_needed = DIV_ROUND_UP(wrlen, 16);
  78        csk = rcu_dereference_sk_user_data(sk);
  79
  80        __set_tcb_field(sk, skb, word, mask, val, 0, 1);
  81        skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
  82        csk->wr_credits -= credits_needed;
  83        csk->wr_unacked += credits_needed;
  84        enqueue_wr(csk, skb);
  85        ret = cxgb4_ofld_send(csk->egress_dev, skb);
  86        if (ret < 0)
  87                kfree_skb(skb);
  88        return ret < 0 ? ret : 0;
  89}
  90
  91/*
  92 * Set one of the t_flags bits in the TCB.
  93 */
  94int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val)
  95{
  96        return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos,
  97                                   (u64)val << bit_pos);
  98}
  99
 100static int chtls_set_tcb_keyid(struct sock *sk, int keyid)
 101{
 102        return chtls_set_tcb_field(sk, 31, 0xFFFFFFFFULL, keyid);
 103}
 104
 105static int chtls_set_tcb_seqno(struct sock *sk)
 106{
 107        return chtls_set_tcb_field(sk, 28, ~0ULL, 0);
 108}
 109
 110static int chtls_set_tcb_quiesce(struct sock *sk, int val)
 111{
 112        return chtls_set_tcb_field(sk, 1, (1ULL << TF_RX_QUIESCE_S),
 113                                   TF_RX_QUIESCE_V(val));
 114}
 115
 116/* TLS Key bitmap processing */
 117int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi)
 118{
 119        unsigned int num_key_ctx, bsize;
 120        int ksize;
 121
 122        num_key_ctx = (lldi->vr->key.size / TLS_KEY_CONTEXT_SZ);
 123        bsize = BITS_TO_LONGS(num_key_ctx);
 124
 125        cdev->kmap.size = num_key_ctx;
 126        cdev->kmap.available = bsize;
 127        ksize = sizeof(*cdev->kmap.addr) * bsize;
 128        cdev->kmap.addr = kvzalloc(ksize, GFP_KERNEL);
 129        if (!cdev->kmap.addr)
 130                return -ENOMEM;
 131
 132        cdev->kmap.start = lldi->vr->key.start;
 133        spin_lock_init(&cdev->kmap.lock);
 134        return 0;
 135}
 136
 137static int get_new_keyid(struct chtls_sock *csk, u32 optname)
 138{
 139        struct net_device *dev = csk->egress_dev;
 140        struct chtls_dev *cdev = csk->cdev;
 141        struct chtls_hws *hws;
 142        struct adapter *adap;
 143        int keyid;
 144
 145        adap = netdev2adap(dev);
 146        hws = &csk->tlshws;
 147
 148        spin_lock_bh(&cdev->kmap.lock);
 149        keyid = find_first_zero_bit(cdev->kmap.addr, cdev->kmap.size);
 150        if (keyid < cdev->kmap.size) {
 151                __set_bit(keyid, cdev->kmap.addr);
 152                if (optname == TLS_RX)
 153                        hws->rxkey = keyid;
 154                else
 155                        hws->txkey = keyid;
 156                atomic_inc(&adap->chcr_stats.tls_key);
 157        } else {
 158                keyid = -1;
 159        }
 160        spin_unlock_bh(&cdev->kmap.lock);
 161        return keyid;
 162}
 163
 164void free_tls_keyid(struct sock *sk)
 165{
 166        struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
 167        struct net_device *dev = csk->egress_dev;
 168        struct chtls_dev *cdev = csk->cdev;
 169        struct chtls_hws *hws;
 170        struct adapter *adap;
 171
 172        if (!cdev->kmap.addr)
 173                return;
 174
 175        adap = netdev2adap(dev);
 176        hws = &csk->tlshws;
 177
 178        spin_lock_bh(&cdev->kmap.lock);
 179        if (hws->rxkey >= 0) {
 180                __clear_bit(hws->rxkey, cdev->kmap.addr);
 181                atomic_dec(&adap->chcr_stats.tls_key);
 182                hws->rxkey = -1;
 183        }
 184        if (hws->txkey >= 0) {
 185                __clear_bit(hws->txkey, cdev->kmap.addr);
 186                atomic_dec(&adap->chcr_stats.tls_key);
 187                hws->txkey = -1;
 188        }
 189        spin_unlock_bh(&cdev->kmap.lock);
 190}
 191
 192unsigned int keyid_to_addr(int start_addr, int keyid)
 193{
 194        return (start_addr + (keyid * TLS_KEY_CONTEXT_SZ)) >> 5;
 195}
 196
 197static void chtls_rxkey_ivauth(struct _key_ctx *kctx)
 198{
 199        kctx->iv_to_auth = cpu_to_be64(KEYCTX_TX_WR_IV_V(6ULL) |
 200                                  KEYCTX_TX_WR_AAD_V(1ULL) |
 201                                  KEYCTX_TX_WR_AADST_V(5ULL) |
 202                                  KEYCTX_TX_WR_CIPHER_V(14ULL) |
 203                                  KEYCTX_TX_WR_CIPHERST_V(0ULL) |
 204                                  KEYCTX_TX_WR_AUTH_V(14ULL) |
 205                                  KEYCTX_TX_WR_AUTHST_V(16ULL) |
 206                                  KEYCTX_TX_WR_AUTHIN_V(16ULL));
 207}
 208
 209static int chtls_key_info(struct chtls_sock *csk,
 210                          struct _key_ctx *kctx,
 211                          u32 keylen, u32 optname,
 212                          int cipher_type)
 213{
 214        unsigned char key[AES_MAX_KEY_SIZE];
 215        unsigned char *key_p, *salt;
 216        unsigned char ghash_h[AEAD_H_SIZE];
 217        int ck_size, key_ctx_size, kctx_mackey_size, salt_size;
 218        struct crypto_aes_ctx aes;
 219        int ret;
 220
 221        key_ctx_size = sizeof(struct _key_ctx) +
 222                       roundup(keylen, 16) + AEAD_H_SIZE;
 223
 224        /* GCM mode of AES supports 128 and 256 bit encryption, so
 225         * prepare key context base on GCM cipher type
 226         */
 227        switch (cipher_type) {
 228        case TLS_CIPHER_AES_GCM_128: {
 229                struct tls12_crypto_info_aes_gcm_128 *gcm_ctx_128 =
 230                        (struct tls12_crypto_info_aes_gcm_128 *)
 231                                        &csk->tlshws.crypto_info;
 232                memcpy(key, gcm_ctx_128->key, keylen);
 233
 234                key_p            = gcm_ctx_128->key;
 235                salt             = gcm_ctx_128->salt;
 236                ck_size          = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 237                salt_size        = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
 238                kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
 239                break;
 240        }
 241        case TLS_CIPHER_AES_GCM_256: {
 242                struct tls12_crypto_info_aes_gcm_256 *gcm_ctx_256 =
 243                        (struct tls12_crypto_info_aes_gcm_256 *)
 244                                        &csk->tlshws.crypto_info;
 245                memcpy(key, gcm_ctx_256->key, keylen);
 246
 247                key_p            = gcm_ctx_256->key;
 248                salt             = gcm_ctx_256->salt;
 249                ck_size          = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
 250                salt_size        = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
 251                kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
 252                break;
 253        }
 254        default:
 255                pr_err("GCM: Invalid key length %d\n", keylen);
 256                return -EINVAL;
 257        }
 258
 259        /* Calculate the H = CIPH(K, 0 repeated 16 times).
 260         * It will go in key context
 261         */
 262        ret = aes_expandkey(&aes, key, keylen);
 263        if (ret)
 264                return ret;
 265
 266        memset(ghash_h, 0, AEAD_H_SIZE);
 267        aes_encrypt(&aes, ghash_h, ghash_h);
 268        memzero_explicit(&aes, sizeof(aes));
 269        csk->tlshws.keylen = key_ctx_size;
 270
 271        /* Copy the Key context */
 272        if (optname == TLS_RX) {
 273                int key_ctx;
 274
 275                key_ctx = ((key_ctx_size >> 4) << 3);
 276                kctx->ctx_hdr = FILL_KEY_CRX_HDR(ck_size,
 277                                                 kctx_mackey_size,
 278                                                 0, 0, key_ctx);
 279                chtls_rxkey_ivauth(kctx);
 280        } else {
 281                kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
 282                                                 kctx_mackey_size,
 283                                                 0, 0, key_ctx_size >> 4);
 284        }
 285
 286        memcpy(kctx->salt, salt, salt_size);
 287        memcpy(kctx->key, key_p, keylen);
 288        memcpy(kctx->key + keylen, ghash_h, AEAD_H_SIZE);
 289        /* erase key info from driver */
 290        memset(key_p, 0, keylen);
 291
 292        return 0;
 293}
 294
 295static void chtls_set_scmd(struct chtls_sock *csk)
 296{
 297        struct chtls_hws *hws = &csk->tlshws;
 298
 299        hws->scmd.seqno_numivs =
 300                SCMD_SEQ_NO_CTRL_V(3) |
 301                SCMD_PROTO_VERSION_V(0) |
 302                SCMD_ENC_DEC_CTRL_V(0) |
 303                SCMD_CIPH_AUTH_SEQ_CTRL_V(1) |
 304                SCMD_CIPH_MODE_V(2) |
 305                SCMD_AUTH_MODE_V(4) |
 306                SCMD_HMAC_CTRL_V(0) |
 307                SCMD_IV_SIZE_V(4) |
 308                SCMD_NUM_IVS_V(1);
 309
 310        hws->scmd.ivgen_hdrlen =
 311                SCMD_IV_GEN_CTRL_V(1) |
 312                SCMD_KEY_CTX_INLINE_V(0) |
 313                SCMD_TLS_FRAG_ENABLE_V(1);
 314}
 315
 316int chtls_setkey(struct chtls_sock *csk, u32 keylen,
 317                 u32 optname, int cipher_type)
 318{
 319        struct tls_key_req *kwr;
 320        struct chtls_dev *cdev;
 321        struct _key_ctx *kctx;
 322        int wrlen, klen, len;
 323        struct sk_buff *skb;
 324        struct sock *sk;
 325        int keyid;
 326        int kaddr;
 327        int ret;
 328
 329        cdev = csk->cdev;
 330        sk = csk->sk;
 331
 332        klen = roundup((keylen + AEAD_H_SIZE) + sizeof(*kctx), 32);
 333        wrlen = roundup(sizeof(*kwr), 16);
 334        len = klen + wrlen;
 335
 336        /* Flush out-standing data before new key takes effect */
 337        if (optname == TLS_TX) {
 338                lock_sock(sk);
 339                if (skb_queue_len(&csk->txq))
 340                        chtls_push_frames(csk, 0);
 341                release_sock(sk);
 342        }
 343
 344        skb = alloc_skb(len, GFP_KERNEL);
 345        if (!skb)
 346                return -ENOMEM;
 347
 348        keyid = get_new_keyid(csk, optname);
 349        if (keyid < 0) {
 350                ret = -ENOSPC;
 351                goto out_nokey;
 352        }
 353
 354        kaddr = keyid_to_addr(cdev->kmap.start, keyid);
 355        kwr = (struct tls_key_req *)__skb_put_zero(skb, len);
 356        kwr->wr.op_to_compl =
 357                cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | FW_WR_COMPL_F |
 358                      FW_WR_ATOMIC_V(1U));
 359        kwr->wr.flowid_len16 =
 360                cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16) |
 361                            FW_WR_FLOWID_V(csk->tid)));
 362        kwr->wr.protocol = 0;
 363        kwr->wr.mfs = htons(TLS_MFS);
 364        kwr->wr.reneg_to_write_rx = optname;
 365
 366        /* ulptx command */
 367        kwr->req.cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
 368                            T5_ULP_MEMIO_ORDER_V(1) |
 369                            T5_ULP_MEMIO_IMM_V(1));
 370        kwr->req.len16 = cpu_to_be32((csk->tid << 8) |
 371                              DIV_ROUND_UP(len - sizeof(kwr->wr), 16));
 372        kwr->req.dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(klen >> 5));
 373        kwr->req.lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(kaddr));
 374
 375        /* sub command */
 376        kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
 377        kwr->sc_imm.len = cpu_to_be32(klen);
 378
 379        lock_sock(sk);
 380        /* key info */
 381        kctx = (struct _key_ctx *)(kwr + 1);
 382        ret = chtls_key_info(csk, kctx, keylen, optname, cipher_type);
 383        if (ret)
 384                goto out_notcb;
 385
 386        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid);
 387        csk->wr_credits -= DIV_ROUND_UP(len, 16);
 388        csk->wr_unacked += DIV_ROUND_UP(len, 16);
 389        enqueue_wr(csk, skb);
 390        cxgb4_ofld_send(csk->egress_dev, skb);
 391
 392        chtls_set_scmd(csk);
 393        /* Clear quiesce for Rx key */
 394        if (optname == TLS_RX) {
 395                ret = chtls_set_tcb_keyid(sk, keyid);
 396                if (ret)
 397                        goto out_notcb;
 398                ret = chtls_set_tcb_field(sk, 0,
 399                                          TCB_ULP_RAW_V(TCB_ULP_RAW_M),
 400                                          TCB_ULP_RAW_V((TF_TLS_KEY_SIZE_V(1) |
 401                                          TF_TLS_CONTROL_V(1) |
 402                                          TF_TLS_ACTIVE_V(1) |
 403                                          TF_TLS_ENABLE_V(1))));
 404                if (ret)
 405                        goto out_notcb;
 406                ret = chtls_set_tcb_seqno(sk);
 407                if (ret)
 408                        goto out_notcb;
 409                ret = chtls_set_tcb_quiesce(sk, 0);
 410                if (ret)
 411                        goto out_notcb;
 412                csk->tlshws.rxkey = keyid;
 413        } else {
 414                csk->tlshws.tx_seq_no = 0;
 415                csk->tlshws.txkey = keyid;
 416        }
 417
 418        release_sock(sk);
 419        return ret;
 420out_notcb:
 421        release_sock(sk);
 422        free_tls_keyid(sk);
 423out_nokey:
 424        kfree_skb(skb);
 425        return ret;
 426}
 427