linux/drivers/crypto/qat/qat_common/qat_algs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
   2/* Copyright(c) 2014 - 2020 Intel Corporation */
   3#include <linux/module.h>
   4#include <linux/slab.h>
   5#include <linux/crypto.h>
   6#include <crypto/internal/aead.h>
   7#include <crypto/internal/skcipher.h>
   8#include <crypto/aes.h>
   9#include <crypto/sha.h>
  10#include <crypto/hash.h>
  11#include <crypto/hmac.h>
  12#include <crypto/algapi.h>
  13#include <crypto/authenc.h>
  14#include <crypto/xts.h>
  15#include <linux/dma-mapping.h>
  16#include "adf_accel_devices.h"
  17#include "adf_transport.h"
  18#include "adf_common_drv.h"
  19#include "qat_crypto.h"
  20#include "icp_qat_hw.h"
  21#include "icp_qat_fw.h"
  22#include "icp_qat_fw_la.h"
  23
  24#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
  25        ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
  26                                       ICP_QAT_HW_CIPHER_NO_CONVERT, \
  27                                       ICP_QAT_HW_CIPHER_ENCRYPT)
  28
  29#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
  30        ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
  31                                       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
  32                                       ICP_QAT_HW_CIPHER_DECRYPT)
  33
  34static DEFINE_MUTEX(algs_lock);
  35static unsigned int active_devs;
  36
  37struct qat_alg_buf {
  38        u32 len;
  39        u32 resrvd;
  40        u64 addr;
  41} __packed;
  42
  43struct qat_alg_buf_list {
  44        u64 resrvd;
  45        u32 num_bufs;
  46        u32 num_mapped_bufs;
  47        struct qat_alg_buf bufers[];
  48} __packed __aligned(64);
  49
  50/* Common content descriptor */
  51struct qat_alg_cd {
  52        union {
  53                struct qat_enc { /* Encrypt content desc */
  54                        struct icp_qat_hw_cipher_algo_blk cipher;
  55                        struct icp_qat_hw_auth_algo_blk hash;
  56                } qat_enc_cd;
  57                struct qat_dec { /* Decrypt content desc */
  58                        struct icp_qat_hw_auth_algo_blk hash;
  59                        struct icp_qat_hw_cipher_algo_blk cipher;
  60                } qat_dec_cd;
  61        };
  62} __aligned(64);
  63
  64struct qat_alg_aead_ctx {
  65        struct qat_alg_cd *enc_cd;
  66        struct qat_alg_cd *dec_cd;
  67        dma_addr_t enc_cd_paddr;
  68        dma_addr_t dec_cd_paddr;
  69        struct icp_qat_fw_la_bulk_req enc_fw_req;
  70        struct icp_qat_fw_la_bulk_req dec_fw_req;
  71        struct crypto_shash *hash_tfm;
  72        enum icp_qat_hw_auth_algo qat_hash_alg;
  73        struct qat_crypto_instance *inst;
  74        union {
  75                struct sha1_state sha1;
  76                struct sha256_state sha256;
  77                struct sha512_state sha512;
  78        };
  79        char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
  80        char opad[SHA512_BLOCK_SIZE];
  81};
  82
  83struct qat_alg_skcipher_ctx {
  84        struct icp_qat_hw_cipher_algo_blk *enc_cd;
  85        struct icp_qat_hw_cipher_algo_blk *dec_cd;
  86        dma_addr_t enc_cd_paddr;
  87        dma_addr_t dec_cd_paddr;
  88        struct icp_qat_fw_la_bulk_req enc_fw_req;
  89        struct icp_qat_fw_la_bulk_req dec_fw_req;
  90        struct qat_crypto_instance *inst;
  91        struct crypto_skcipher *ftfm;
  92        bool fallback;
  93};
  94
  95static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
  96{
  97        switch (qat_hash_alg) {
  98        case ICP_QAT_HW_AUTH_ALGO_SHA1:
  99                return ICP_QAT_HW_SHA1_STATE1_SZ;
 100        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 101                return ICP_QAT_HW_SHA256_STATE1_SZ;
 102        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 103                return ICP_QAT_HW_SHA512_STATE1_SZ;
 104        default:
 105                return -EFAULT;
 106        };
 107        return -EFAULT;
 108}
 109
 110static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
 111                                  struct qat_alg_aead_ctx *ctx,
 112                                  const u8 *auth_key,
 113                                  unsigned int auth_keylen)
 114{
 115        SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
 116        int block_size = crypto_shash_blocksize(ctx->hash_tfm);
 117        int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
 118        __be32 *hash_state_out;
 119        __be64 *hash512_state_out;
 120        int i, offset;
 121
 122        memset(ctx->ipad, 0, block_size);
 123        memset(ctx->opad, 0, block_size);
 124        shash->tfm = ctx->hash_tfm;
 125
 126        if (auth_keylen > block_size) {
 127                int ret = crypto_shash_digest(shash, auth_key,
 128                                              auth_keylen, ctx->ipad);
 129                if (ret)
 130                        return ret;
 131
 132                memcpy(ctx->opad, ctx->ipad, digest_size);
 133        } else {
 134                memcpy(ctx->ipad, auth_key, auth_keylen);
 135                memcpy(ctx->opad, auth_key, auth_keylen);
 136        }
 137
 138        for (i = 0; i < block_size; i++) {
 139                char *ipad_ptr = ctx->ipad + i;
 140                char *opad_ptr = ctx->opad + i;
 141                *ipad_ptr ^= HMAC_IPAD_VALUE;
 142                *opad_ptr ^= HMAC_OPAD_VALUE;
 143        }
 144
 145        if (crypto_shash_init(shash))
 146                return -EFAULT;
 147
 148        if (crypto_shash_update(shash, ctx->ipad, block_size))
 149                return -EFAULT;
 150
 151        hash_state_out = (__be32 *)hash->sha.state1;
 152        hash512_state_out = (__be64 *)hash_state_out;
 153
 154        switch (ctx->qat_hash_alg) {
 155        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 156                if (crypto_shash_export(shash, &ctx->sha1))
 157                        return -EFAULT;
 158                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 159                        *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
 160                break;
 161        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 162                if (crypto_shash_export(shash, &ctx->sha256))
 163                        return -EFAULT;
 164                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 165                        *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
 166                break;
 167        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 168                if (crypto_shash_export(shash, &ctx->sha512))
 169                        return -EFAULT;
 170                for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
 171                        *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
 172                break;
 173        default:
 174                return -EFAULT;
 175        }
 176
 177        if (crypto_shash_init(shash))
 178                return -EFAULT;
 179
 180        if (crypto_shash_update(shash, ctx->opad, block_size))
 181                return -EFAULT;
 182
 183        offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
 184        if (offset < 0)
 185                return -EFAULT;
 186
 187        hash_state_out = (__be32 *)(hash->sha.state1 + offset);
 188        hash512_state_out = (__be64 *)hash_state_out;
 189
 190        switch (ctx->qat_hash_alg) {
 191        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 192                if (crypto_shash_export(shash, &ctx->sha1))
 193                        return -EFAULT;
 194                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 195                        *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
 196                break;
 197        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 198                if (crypto_shash_export(shash, &ctx->sha256))
 199                        return -EFAULT;
 200                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 201                        *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
 202                break;
 203        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 204                if (crypto_shash_export(shash, &ctx->sha512))
 205                        return -EFAULT;
 206                for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
 207                        *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
 208                break;
 209        default:
 210                return -EFAULT;
 211        }
 212        memzero_explicit(ctx->ipad, block_size);
 213        memzero_explicit(ctx->opad, block_size);
 214        return 0;
 215}
 216
 217static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
 218{
 219        ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
 220                                           ICP_QAT_FW_CIPH_IV_64BIT_PTR);
 221        ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
 222                                       ICP_QAT_FW_LA_UPDATE_STATE);
 223}
 224
 225static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
 226{
 227        ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
 228                                           ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
 229        ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
 230                                       ICP_QAT_FW_LA_NO_UPDATE_STATE);
 231}
 232
 233static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
 234                                    int aead)
 235{
 236        header->hdr_flags =
 237                ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
 238        header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
 239        header->comn_req_flags =
 240                ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
 241                                            QAT_COMN_PTR_TYPE_SGL);
 242        ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
 243                                  ICP_QAT_FW_LA_PARTIAL_NONE);
 244        if (aead)
 245                qat_alg_init_hdr_no_iv_updt(header);
 246        else
 247                qat_alg_init_hdr_iv_updt(header);
 248        ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
 249                                ICP_QAT_FW_LA_NO_PROTO);
 250}
 251
 252static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
 253                                         int alg,
 254                                         struct crypto_authenc_keys *keys,
 255                                         int mode)
 256{
 257        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
 258        unsigned int digestsize = crypto_aead_authsize(aead_tfm);
 259        struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
 260        struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
 261        struct icp_qat_hw_auth_algo_blk *hash =
 262                (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
 263                sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
 264        struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
 265        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
 266        struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
 267        void *ptr = &req_tmpl->cd_ctrl;
 268        struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
 269        struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
 270
 271        /* CD setup */
 272        cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
 273        memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
 274        hash->sha.inner_setup.auth_config.config =
 275                ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
 276                                             ctx->qat_hash_alg, digestsize);
 277        hash->sha.inner_setup.auth_counter.counter =
 278                cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
 279
 280        if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
 281                return -EFAULT;
 282
 283        /* Request setup */
 284        qat_alg_init_common_hdr(header, 1);
 285        header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
 286        ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
 287                                           ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
 288        ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
 289                                   ICP_QAT_FW_LA_RET_AUTH_RES);
 290        ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
 291                                   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
 292        cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
 293        cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
 294
 295        /* Cipher CD config setup */
 296        cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
 297        cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
 298        cipher_cd_ctrl->cipher_cfg_offset = 0;
 299        ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 300        ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
 301        /* Auth CD config setup */
 302        hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
 303        hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
 304        hash_cd_ctrl->inner_res_sz = digestsize;
 305        hash_cd_ctrl->final_sz = digestsize;
 306
 307        switch (ctx->qat_hash_alg) {
 308        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 309                hash_cd_ctrl->inner_state1_sz =
 310                        round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
 311                hash_cd_ctrl->inner_state2_sz =
 312                        round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
 313                break;
 314        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 315                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
 316                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
 317                break;
 318        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 319                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
 320                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
 321                break;
 322        default:
 323                break;
 324        }
 325        hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
 326                        ((sizeof(struct icp_qat_hw_auth_setup) +
 327                         round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
 328        ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
 329        ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
 330        return 0;
 331}
 332
 333static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
 334                                         int alg,
 335                                         struct crypto_authenc_keys *keys,
 336                                         int mode)
 337{
 338        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
 339        unsigned int digestsize = crypto_aead_authsize(aead_tfm);
 340        struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
 341        struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
 342        struct icp_qat_hw_cipher_algo_blk *cipher =
 343                (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
 344                sizeof(struct icp_qat_hw_auth_setup) +
 345                roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
 346        struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
 347        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
 348        struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
 349        void *ptr = &req_tmpl->cd_ctrl;
 350        struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
 351        struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
 352        struct icp_qat_fw_la_auth_req_params *auth_param =
 353                (struct icp_qat_fw_la_auth_req_params *)
 354                ((char *)&req_tmpl->serv_specif_rqpars +
 355                sizeof(struct icp_qat_fw_la_cipher_req_params));
 356
 357        /* CD setup */
 358        cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
 359        memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
 360        hash->sha.inner_setup.auth_config.config =
 361                ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
 362                                             ctx->qat_hash_alg,
 363                                             digestsize);
 364        hash->sha.inner_setup.auth_counter.counter =
 365                cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
 366
 367        if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
 368                return -EFAULT;
 369
 370        /* Request setup */
 371        qat_alg_init_common_hdr(header, 1);
 372        header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
 373        ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
 374                                           ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
 375        ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
 376                                   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
 377        ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
 378                                   ICP_QAT_FW_LA_CMP_AUTH_RES);
 379        cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
 380        cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
 381
 382        /* Cipher CD config setup */
 383        cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
 384        cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
 385        cipher_cd_ctrl->cipher_cfg_offset =
 386                (sizeof(struct icp_qat_hw_auth_setup) +
 387                 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
 388        ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 389        ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
 390
 391        /* Auth CD config setup */
 392        hash_cd_ctrl->hash_cfg_offset = 0;
 393        hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
 394        hash_cd_ctrl->inner_res_sz = digestsize;
 395        hash_cd_ctrl->final_sz = digestsize;
 396
 397        switch (ctx->qat_hash_alg) {
 398        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 399                hash_cd_ctrl->inner_state1_sz =
 400                        round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
 401                hash_cd_ctrl->inner_state2_sz =
 402                        round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
 403                break;
 404        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 405                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
 406                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
 407                break;
 408        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 409                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
 410                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
 411                break;
 412        default:
 413                break;
 414        }
 415
 416        hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
 417                        ((sizeof(struct icp_qat_hw_auth_setup) +
 418                         round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
 419        auth_param->auth_res_sz = digestsize;
 420        ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
 421        ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 422        return 0;
 423}
 424
 425static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
 426                                      struct icp_qat_fw_la_bulk_req *req,
 427                                      struct icp_qat_hw_cipher_algo_blk *cd,
 428                                      const u8 *key, unsigned int keylen)
 429{
 430        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
 431        struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
 432        struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
 433
 434        memcpy(cd->aes.key, key, keylen);
 435        qat_alg_init_common_hdr(header, 0);
 436        header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
 437        cd_pars->u.s.content_desc_params_sz =
 438                                sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
 439        /* Cipher CD config setup */
 440        cd_ctrl->cipher_key_sz = keylen >> 3;
 441        cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
 442        cd_ctrl->cipher_cfg_offset = 0;
 443        ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 444        ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
 445}
 446
 447static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
 448                                      int alg, const u8 *key,
 449                                      unsigned int keylen, int mode)
 450{
 451        struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
 452        struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
 453        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
 454
 455        qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
 456        cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
 457        enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
 458}
 459
 460static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
 461                                      int alg, const u8 *key,
 462                                      unsigned int keylen, int mode)
 463{
 464        struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
 465        struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
 466        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
 467
 468        qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
 469        cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
 470
 471        if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
 472                dec_cd->aes.cipher_config.val =
 473                                        QAT_AES_HW_CONFIG_DEC(alg, mode);
 474        else
 475                dec_cd->aes.cipher_config.val =
 476                                        QAT_AES_HW_CONFIG_ENC(alg, mode);
 477}
 478
 479static int qat_alg_validate_key(int key_len, int *alg, int mode)
 480{
 481        if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
 482                switch (key_len) {
 483                case AES_KEYSIZE_128:
 484                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
 485                        break;
 486                case AES_KEYSIZE_192:
 487                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
 488                        break;
 489                case AES_KEYSIZE_256:
 490                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
 491                        break;
 492                default:
 493                        return -EINVAL;
 494                }
 495        } else {
 496                switch (key_len) {
 497                case AES_KEYSIZE_128 << 1:
 498                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
 499                        break;
 500                case AES_KEYSIZE_256 << 1:
 501                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
 502                        break;
 503                default:
 504                        return -EINVAL;
 505                }
 506        }
 507        return 0;
 508}
 509
 510static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
 511                                      unsigned int keylen,  int mode)
 512{
 513        struct crypto_authenc_keys keys;
 514        int alg;
 515
 516        if (crypto_authenc_extractkeys(&keys, key, keylen))
 517                goto bad_key;
 518
 519        if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
 520                goto bad_key;
 521
 522        if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
 523                goto error;
 524
 525        if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
 526                goto error;
 527
 528        memzero_explicit(&keys, sizeof(keys));
 529        return 0;
 530bad_key:
 531        memzero_explicit(&keys, sizeof(keys));
 532        return -EINVAL;
 533error:
 534        memzero_explicit(&keys, sizeof(keys));
 535        return -EFAULT;
 536}
 537
 538static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
 539                                          const u8 *key,
 540                                          unsigned int keylen,
 541                                          int mode)
 542{
 543        int alg;
 544
 545        if (qat_alg_validate_key(keylen, &alg, mode))
 546                return -EINVAL;
 547
 548        qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
 549        qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
 550        return 0;
 551}
 552
 553static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
 554                              unsigned int keylen)
 555{
 556        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 557
 558        memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
 559        memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
 560        memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
 561        memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
 562
 563        return qat_alg_aead_init_sessions(tfm, key, keylen,
 564                                          ICP_QAT_HW_CIPHER_CBC_MODE);
 565}
 566
 567static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
 568                               unsigned int keylen)
 569{
 570        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 571        struct qat_crypto_instance *inst = NULL;
 572        int node = get_current_node();
 573        struct device *dev;
 574        int ret;
 575
 576        inst = qat_crypto_get_instance_node(node);
 577        if (!inst)
 578                return -EINVAL;
 579        dev = &GET_DEV(inst->accel_dev);
 580        ctx->inst = inst;
 581        ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
 582                                         &ctx->enc_cd_paddr,
 583                                         GFP_ATOMIC);
 584        if (!ctx->enc_cd) {
 585                ret = -ENOMEM;
 586                goto out_free_inst;
 587        }
 588        ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
 589                                         &ctx->dec_cd_paddr,
 590                                         GFP_ATOMIC);
 591        if (!ctx->dec_cd) {
 592                ret = -ENOMEM;
 593                goto out_free_enc;
 594        }
 595
 596        ret = qat_alg_aead_init_sessions(tfm, key, keylen,
 597                                         ICP_QAT_HW_CIPHER_CBC_MODE);
 598        if (ret)
 599                goto out_free_all;
 600
 601        return 0;
 602
 603out_free_all:
 604        memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
 605        dma_free_coherent(dev, sizeof(struct qat_alg_cd),
 606                          ctx->dec_cd, ctx->dec_cd_paddr);
 607        ctx->dec_cd = NULL;
 608out_free_enc:
 609        memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
 610        dma_free_coherent(dev, sizeof(struct qat_alg_cd),
 611                          ctx->enc_cd, ctx->enc_cd_paddr);
 612        ctx->enc_cd = NULL;
 613out_free_inst:
 614        ctx->inst = NULL;
 615        qat_crypto_put_instance(inst);
 616        return ret;
 617}
 618
 619static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 620                               unsigned int keylen)
 621{
 622        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 623
 624        if (ctx->enc_cd)
 625                return qat_alg_aead_rekey(tfm, key, keylen);
 626        else
 627                return qat_alg_aead_newkey(tfm, key, keylen);
 628}
 629
 630static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
 631                              struct qat_crypto_request *qat_req)
 632{
 633        struct device *dev = &GET_DEV(inst->accel_dev);
 634        struct qat_alg_buf_list *bl = qat_req->buf.bl;
 635        struct qat_alg_buf_list *blout = qat_req->buf.blout;
 636        dma_addr_t blp = qat_req->buf.blp;
 637        dma_addr_t blpout = qat_req->buf.bloutp;
 638        size_t sz = qat_req->buf.sz;
 639        size_t sz_out = qat_req->buf.sz_out;
 640        int i;
 641
 642        for (i = 0; i < bl->num_bufs; i++)
 643                dma_unmap_single(dev, bl->bufers[i].addr,
 644                                 bl->bufers[i].len, DMA_BIDIRECTIONAL);
 645
 646        dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
 647        kfree(bl);
 648        if (blp != blpout) {
 649                /* If out of place operation dma unmap only data */
 650                int bufless = blout->num_bufs - blout->num_mapped_bufs;
 651
 652                for (i = bufless; i < blout->num_bufs; i++) {
 653                        dma_unmap_single(dev, blout->bufers[i].addr,
 654                                         blout->bufers[i].len,
 655                                         DMA_BIDIRECTIONAL);
 656                }
 657                dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
 658                kfree(blout);
 659        }
 660}
 661
 662static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
 663                               struct scatterlist *sgl,
 664                               struct scatterlist *sglout,
 665                               struct qat_crypto_request *qat_req)
 666{
 667        struct device *dev = &GET_DEV(inst->accel_dev);
 668        int i, sg_nctr = 0;
 669        int n = sg_nents(sgl);
 670        struct qat_alg_buf_list *bufl;
 671        struct qat_alg_buf_list *buflout = NULL;
 672        dma_addr_t blp;
 673        dma_addr_t bloutp = 0;
 674        struct scatterlist *sg;
 675        size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
 676
 677        if (unlikely(!n))
 678                return -EINVAL;
 679
 680        bufl = kzalloc_node(sz, GFP_ATOMIC,
 681                            dev_to_node(&GET_DEV(inst->accel_dev)));
 682        if (unlikely(!bufl))
 683                return -ENOMEM;
 684
 685        blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
 686        if (unlikely(dma_mapping_error(dev, blp)))
 687                goto err_in;
 688
 689        for_each_sg(sgl, sg, n, i) {
 690                int y = sg_nctr;
 691
 692                if (!sg->length)
 693                        continue;
 694
 695                bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
 696                                                      sg->length,
 697                                                      DMA_BIDIRECTIONAL);
 698                bufl->bufers[y].len = sg->length;
 699                if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
 700                        goto err_in;
 701                sg_nctr++;
 702        }
 703        bufl->num_bufs = sg_nctr;
 704        qat_req->buf.bl = bufl;
 705        qat_req->buf.blp = blp;
 706        qat_req->buf.sz = sz;
 707        /* Handle out of place operation */
 708        if (sgl != sglout) {
 709                struct qat_alg_buf *bufers;
 710
 711                n = sg_nents(sglout);
 712                sz_out = struct_size(buflout, bufers, n + 1);
 713                sg_nctr = 0;
 714                buflout = kzalloc_node(sz_out, GFP_ATOMIC,
 715                                       dev_to_node(&GET_DEV(inst->accel_dev)));
 716                if (unlikely(!buflout))
 717                        goto err_in;
 718                bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
 719                if (unlikely(dma_mapping_error(dev, bloutp)))
 720                        goto err_out;
 721                bufers = buflout->bufers;
 722                for_each_sg(sglout, sg, n, i) {
 723                        int y = sg_nctr;
 724
 725                        if (!sg->length)
 726                                continue;
 727
 728                        bufers[y].addr = dma_map_single(dev, sg_virt(sg),
 729                                                        sg->length,
 730                                                        DMA_BIDIRECTIONAL);
 731                        if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
 732                                goto err_out;
 733                        bufers[y].len = sg->length;
 734                        sg_nctr++;
 735                }
 736                buflout->num_bufs = sg_nctr;
 737                buflout->num_mapped_bufs = sg_nctr;
 738                qat_req->buf.blout = buflout;
 739                qat_req->buf.bloutp = bloutp;
 740                qat_req->buf.sz_out = sz_out;
 741        } else {
 742                /* Otherwise set the src and dst to the same address */
 743                qat_req->buf.bloutp = qat_req->buf.blp;
 744                qat_req->buf.sz_out = 0;
 745        }
 746        return 0;
 747
 748err_out:
 749        n = sg_nents(sglout);
 750        for (i = 0; i < n; i++)
 751                if (!dma_mapping_error(dev, buflout->bufers[i].addr))
 752                        dma_unmap_single(dev, buflout->bufers[i].addr,
 753                                         buflout->bufers[i].len,
 754                                         DMA_BIDIRECTIONAL);
 755        if (!dma_mapping_error(dev, bloutp))
 756                dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
 757        kfree(buflout);
 758
 759err_in:
 760        n = sg_nents(sgl);
 761        for (i = 0; i < n; i++)
 762                if (!dma_mapping_error(dev, bufl->bufers[i].addr))
 763                        dma_unmap_single(dev, bufl->bufers[i].addr,
 764                                         bufl->bufers[i].len,
 765                                         DMA_BIDIRECTIONAL);
 766
 767        if (!dma_mapping_error(dev, blp))
 768                dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
 769        kfree(bufl);
 770
 771        dev_err(dev, "Failed to map buf for dma\n");
 772        return -ENOMEM;
 773}
 774
 775static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
 776                                  struct qat_crypto_request *qat_req)
 777{
 778        struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
 779        struct qat_crypto_instance *inst = ctx->inst;
 780        struct aead_request *areq = qat_req->aead_req;
 781        u8 stat_filed = qat_resp->comn_resp.comn_status;
 782        int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
 783
 784        qat_alg_free_bufl(inst, qat_req);
 785        if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
 786                res = -EBADMSG;
 787        areq->base.complete(&areq->base, res);
 788}
 789
 790static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
 791                                      struct qat_crypto_request *qat_req)
 792{
 793        struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
 794        struct qat_crypto_instance *inst = ctx->inst;
 795        struct skcipher_request *sreq = qat_req->skcipher_req;
 796        u8 stat_filed = qat_resp->comn_resp.comn_status;
 797        struct device *dev = &GET_DEV(ctx->inst->accel_dev);
 798        int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
 799
 800        qat_alg_free_bufl(inst, qat_req);
 801        if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
 802                res = -EINVAL;
 803
 804        memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
 805        dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
 806                          qat_req->iv_paddr);
 807
 808        sreq->base.complete(&sreq->base, res);
 809}
 810
 811void qat_alg_callback(void *resp)
 812{
 813        struct icp_qat_fw_la_resp *qat_resp = resp;
 814        struct qat_crypto_request *qat_req =
 815                                (void *)(__force long)qat_resp->opaque_data;
 816
 817        qat_req->cb(qat_resp, qat_req);
 818}
 819
 820static int qat_alg_aead_dec(struct aead_request *areq)
 821{
 822        struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
 823        struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
 824        struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
 825        struct qat_crypto_request *qat_req = aead_request_ctx(areq);
 826        struct icp_qat_fw_la_cipher_req_params *cipher_param;
 827        struct icp_qat_fw_la_auth_req_params *auth_param;
 828        struct icp_qat_fw_la_bulk_req *msg;
 829        int digst_size = crypto_aead_authsize(aead_tfm);
 830        int ret, ctr = 0;
 831
 832        ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
 833        if (unlikely(ret))
 834                return ret;
 835
 836        msg = &qat_req->req;
 837        *msg = ctx->dec_fw_req;
 838        qat_req->aead_ctx = ctx;
 839        qat_req->aead_req = areq;
 840        qat_req->cb = qat_aead_alg_callback;
 841        qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
 842        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
 843        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
 844        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
 845        cipher_param->cipher_length = areq->cryptlen - digst_size;
 846        cipher_param->cipher_offset = areq->assoclen;
 847        memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
 848        auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
 849        auth_param->auth_off = 0;
 850        auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
 851        do {
 852                ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
 853        } while (ret == -EAGAIN && ctr++ < 10);
 854
 855        if (ret == -EAGAIN) {
 856                qat_alg_free_bufl(ctx->inst, qat_req);
 857                return -EBUSY;
 858        }
 859        return -EINPROGRESS;
 860}
 861
 862static int qat_alg_aead_enc(struct aead_request *areq)
 863{
 864        struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
 865        struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
 866        struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
 867        struct qat_crypto_request *qat_req = aead_request_ctx(areq);
 868        struct icp_qat_fw_la_cipher_req_params *cipher_param;
 869        struct icp_qat_fw_la_auth_req_params *auth_param;
 870        struct icp_qat_fw_la_bulk_req *msg;
 871        u8 *iv = areq->iv;
 872        int ret, ctr = 0;
 873
 874        ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
 875        if (unlikely(ret))
 876                return ret;
 877
 878        msg = &qat_req->req;
 879        *msg = ctx->enc_fw_req;
 880        qat_req->aead_ctx = ctx;
 881        qat_req->aead_req = areq;
 882        qat_req->cb = qat_aead_alg_callback;
 883        qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
 884        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
 885        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
 886        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
 887        auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
 888
 889        memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
 890        cipher_param->cipher_length = areq->cryptlen;
 891        cipher_param->cipher_offset = areq->assoclen;
 892
 893        auth_param->auth_off = 0;
 894        auth_param->auth_len = areq->assoclen + areq->cryptlen;
 895
 896        do {
 897                ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
 898        } while (ret == -EAGAIN && ctr++ < 10);
 899
 900        if (ret == -EAGAIN) {
 901                qat_alg_free_bufl(ctx->inst, qat_req);
 902                return -EBUSY;
 903        }
 904        return -EINPROGRESS;
 905}
 906
 907static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
 908                                  const u8 *key, unsigned int keylen,
 909                                  int mode)
 910{
 911        memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
 912        memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
 913        memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
 914        memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
 915
 916        return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
 917}
 918
 919static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
 920                                   const u8 *key, unsigned int keylen,
 921                                   int mode)
 922{
 923        struct qat_crypto_instance *inst = NULL;
 924        struct device *dev;
 925        int node = get_current_node();
 926        int ret;
 927
 928        inst = qat_crypto_get_instance_node(node);
 929        if (!inst)
 930                return -EINVAL;
 931        dev = &GET_DEV(inst->accel_dev);
 932        ctx->inst = inst;
 933        ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
 934                                         &ctx->enc_cd_paddr,
 935                                         GFP_ATOMIC);
 936        if (!ctx->enc_cd) {
 937                ret = -ENOMEM;
 938                goto out_free_instance;
 939        }
 940        ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
 941                                         &ctx->dec_cd_paddr,
 942                                         GFP_ATOMIC);
 943        if (!ctx->dec_cd) {
 944                ret = -ENOMEM;
 945                goto out_free_enc;
 946        }
 947
 948        ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
 949        if (ret)
 950                goto out_free_all;
 951
 952        return 0;
 953
 954out_free_all:
 955        memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
 956        dma_free_coherent(dev, sizeof(*ctx->dec_cd),
 957                          ctx->dec_cd, ctx->dec_cd_paddr);
 958        ctx->dec_cd = NULL;
 959out_free_enc:
 960        memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
 961        dma_free_coherent(dev, sizeof(*ctx->enc_cd),
 962                          ctx->enc_cd, ctx->enc_cd_paddr);
 963        ctx->enc_cd = NULL;
 964out_free_instance:
 965        ctx->inst = NULL;
 966        qat_crypto_put_instance(inst);
 967        return ret;
 968}
 969
 970static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
 971                                   const u8 *key, unsigned int keylen,
 972                                   int mode)
 973{
 974        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 975
 976        if (ctx->enc_cd)
 977                return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
 978        else
 979                return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
 980}
 981
 982static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
 983                                       const u8 *key, unsigned int keylen)
 984{
 985        return qat_alg_skcipher_setkey(tfm, key, keylen,
 986                                       ICP_QAT_HW_CIPHER_CBC_MODE);
 987}
 988
 989static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
 990                                       const u8 *key, unsigned int keylen)
 991{
 992        return qat_alg_skcipher_setkey(tfm, key, keylen,
 993                                       ICP_QAT_HW_CIPHER_CTR_MODE);
 994}
 995
 996static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
 997                                       const u8 *key, unsigned int keylen)
 998{
 999        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1000        int ret;
1001
1002        ret = xts_verify_key(tfm, key, keylen);
1003        if (ret)
1004                return ret;
1005
1006        if (keylen >> 1 == AES_KEYSIZE_192) {
1007                ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
1008                if (ret)
1009                        return ret;
1010
1011                ctx->fallback = true;
1012
1013                return 0;
1014        }
1015
1016        ctx->fallback = false;
1017
1018        return qat_alg_skcipher_setkey(tfm, key, keylen,
1019                                       ICP_QAT_HW_CIPHER_XTS_MODE);
1020}
1021
1022static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1023{
1024        struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1025        struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1026        struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1027        struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1028        struct icp_qat_fw_la_cipher_req_params *cipher_param;
1029        struct icp_qat_fw_la_bulk_req *msg;
1030        struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1031        int ret, ctr = 0;
1032
1033        if (req->cryptlen == 0)
1034                return 0;
1035
1036        qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1037                                         &qat_req->iv_paddr, GFP_ATOMIC);
1038        if (!qat_req->iv)
1039                return -ENOMEM;
1040
1041        ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1042        if (unlikely(ret)) {
1043                dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1044                                  qat_req->iv_paddr);
1045                return ret;
1046        }
1047
1048        msg = &qat_req->req;
1049        *msg = ctx->enc_fw_req;
1050        qat_req->skcipher_ctx = ctx;
1051        qat_req->skcipher_req = req;
1052        qat_req->cb = qat_skcipher_alg_callback;
1053        qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1054        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1055        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1056        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1057        cipher_param->cipher_length = req->cryptlen;
1058        cipher_param->cipher_offset = 0;
1059        cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1060        memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
1061        do {
1062                ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1063        } while (ret == -EAGAIN && ctr++ < 10);
1064
1065        if (ret == -EAGAIN) {
1066                qat_alg_free_bufl(ctx->inst, qat_req);
1067                dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1068                                  qat_req->iv_paddr);
1069                return -EBUSY;
1070        }
1071        return -EINPROGRESS;
1072}
1073
1074static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1075{
1076        if (req->cryptlen % AES_BLOCK_SIZE != 0)
1077                return -EINVAL;
1078
1079        return qat_alg_skcipher_encrypt(req);
1080}
1081
1082static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1083{
1084        struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1085        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1086        struct skcipher_request *nreq = skcipher_request_ctx(req);
1087
1088        if (req->cryptlen < XTS_BLOCK_SIZE)
1089                return -EINVAL;
1090
1091        if (ctx->fallback) {
1092                memcpy(nreq, req, sizeof(*req));
1093                skcipher_request_set_tfm(nreq, ctx->ftfm);
1094                return crypto_skcipher_encrypt(nreq);
1095        }
1096
1097        return qat_alg_skcipher_encrypt(req);
1098}
1099
1100static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1101{
1102        struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1103        struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1104        struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1105        struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1106        struct icp_qat_fw_la_cipher_req_params *cipher_param;
1107        struct icp_qat_fw_la_bulk_req *msg;
1108        struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1109        int ret, ctr = 0;
1110
1111        if (req->cryptlen == 0)
1112                return 0;
1113
1114        qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1115                                         &qat_req->iv_paddr, GFP_ATOMIC);
1116        if (!qat_req->iv)
1117                return -ENOMEM;
1118
1119        ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1120        if (unlikely(ret)) {
1121                dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1122                                  qat_req->iv_paddr);
1123                return ret;
1124        }
1125
1126        msg = &qat_req->req;
1127        *msg = ctx->dec_fw_req;
1128        qat_req->skcipher_ctx = ctx;
1129        qat_req->skcipher_req = req;
1130        qat_req->cb = qat_skcipher_alg_callback;
1131        qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1132        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1133        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1134        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1135        cipher_param->cipher_length = req->cryptlen;
1136        cipher_param->cipher_offset = 0;
1137        cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1138        memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
1139        do {
1140                ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1141        } while (ret == -EAGAIN && ctr++ < 10);
1142
1143        if (ret == -EAGAIN) {
1144                qat_alg_free_bufl(ctx->inst, qat_req);
1145                dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1146                                  qat_req->iv_paddr);
1147                return -EBUSY;
1148        }
1149        return -EINPROGRESS;
1150}
1151
1152static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1153{
1154        if (req->cryptlen % AES_BLOCK_SIZE != 0)
1155                return -EINVAL;
1156
1157        return qat_alg_skcipher_decrypt(req);
1158}
1159
1160static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1161{
1162        struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1163        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1164        struct skcipher_request *nreq = skcipher_request_ctx(req);
1165
1166        if (req->cryptlen < XTS_BLOCK_SIZE)
1167                return -EINVAL;
1168
1169        if (ctx->fallback) {
1170                memcpy(nreq, req, sizeof(*req));
1171                skcipher_request_set_tfm(nreq, ctx->ftfm);
1172                return crypto_skcipher_decrypt(nreq);
1173        }
1174
1175        return qat_alg_skcipher_decrypt(req);
1176}
1177
1178static int qat_alg_aead_init(struct crypto_aead *tfm,
1179                             enum icp_qat_hw_auth_algo hash,
1180                             const char *hash_name)
1181{
1182        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1183
1184        ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1185        if (IS_ERR(ctx->hash_tfm))
1186                return PTR_ERR(ctx->hash_tfm);
1187        ctx->qat_hash_alg = hash;
1188        crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1189        return 0;
1190}
1191
1192static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1193{
1194        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1195}
1196
1197static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1198{
1199        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1200}
1201
1202static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1203{
1204        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1205}
1206
1207static void qat_alg_aead_exit(struct crypto_aead *tfm)
1208{
1209        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1210        struct qat_crypto_instance *inst = ctx->inst;
1211        struct device *dev;
1212
1213        crypto_free_shash(ctx->hash_tfm);
1214
1215        if (!inst)
1216                return;
1217
1218        dev = &GET_DEV(inst->accel_dev);
1219        if (ctx->enc_cd) {
1220                memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1221                dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1222                                  ctx->enc_cd, ctx->enc_cd_paddr);
1223        }
1224        if (ctx->dec_cd) {
1225                memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1226                dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1227                                  ctx->dec_cd, ctx->dec_cd_paddr);
1228        }
1229        qat_crypto_put_instance(inst);
1230}
1231
1232static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1233{
1234        crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1235        return 0;
1236}
1237
1238static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1239{
1240        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1241        int reqsize;
1242
1243        ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1244                                          CRYPTO_ALG_NEED_FALLBACK);
1245        if (IS_ERR(ctx->ftfm))
1246                return PTR_ERR(ctx->ftfm);
1247
1248        reqsize = max(sizeof(struct qat_crypto_request),
1249                      sizeof(struct skcipher_request) +
1250                      crypto_skcipher_reqsize(ctx->ftfm));
1251        crypto_skcipher_set_reqsize(tfm, reqsize);
1252
1253        return 0;
1254}
1255
1256static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1257{
1258        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1259        struct qat_crypto_instance *inst = ctx->inst;
1260        struct device *dev;
1261
1262        if (!inst)
1263                return;
1264
1265        dev = &GET_DEV(inst->accel_dev);
1266        if (ctx->enc_cd) {
1267                memset(ctx->enc_cd, 0,
1268                       sizeof(struct icp_qat_hw_cipher_algo_blk));
1269                dma_free_coherent(dev,
1270                                  sizeof(struct icp_qat_hw_cipher_algo_blk),
1271                                  ctx->enc_cd, ctx->enc_cd_paddr);
1272        }
1273        if (ctx->dec_cd) {
1274                memset(ctx->dec_cd, 0,
1275                       sizeof(struct icp_qat_hw_cipher_algo_blk));
1276                dma_free_coherent(dev,
1277                                  sizeof(struct icp_qat_hw_cipher_algo_blk),
1278                                  ctx->dec_cd, ctx->dec_cd_paddr);
1279        }
1280        qat_crypto_put_instance(inst);
1281}
1282
1283static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1284{
1285        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1286
1287        if (ctx->ftfm)
1288                crypto_free_skcipher(ctx->ftfm);
1289
1290        qat_alg_skcipher_exit_tfm(tfm);
1291}
1292
1293static struct aead_alg qat_aeads[] = { {
1294        .base = {
1295                .cra_name = "authenc(hmac(sha1),cbc(aes))",
1296                .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1297                .cra_priority = 4001,
1298                .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1299                .cra_blocksize = AES_BLOCK_SIZE,
1300                .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1301                .cra_module = THIS_MODULE,
1302        },
1303        .init = qat_alg_aead_sha1_init,
1304        .exit = qat_alg_aead_exit,
1305        .setkey = qat_alg_aead_setkey,
1306        .decrypt = qat_alg_aead_dec,
1307        .encrypt = qat_alg_aead_enc,
1308        .ivsize = AES_BLOCK_SIZE,
1309        .maxauthsize = SHA1_DIGEST_SIZE,
1310}, {
1311        .base = {
1312                .cra_name = "authenc(hmac(sha256),cbc(aes))",
1313                .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1314                .cra_priority = 4001,
1315                .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1316                .cra_blocksize = AES_BLOCK_SIZE,
1317                .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1318                .cra_module = THIS_MODULE,
1319        },
1320        .init = qat_alg_aead_sha256_init,
1321        .exit = qat_alg_aead_exit,
1322        .setkey = qat_alg_aead_setkey,
1323        .decrypt = qat_alg_aead_dec,
1324        .encrypt = qat_alg_aead_enc,
1325        .ivsize = AES_BLOCK_SIZE,
1326        .maxauthsize = SHA256_DIGEST_SIZE,
1327}, {
1328        .base = {
1329                .cra_name = "authenc(hmac(sha512),cbc(aes))",
1330                .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1331                .cra_priority = 4001,
1332                .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1333                .cra_blocksize = AES_BLOCK_SIZE,
1334                .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1335                .cra_module = THIS_MODULE,
1336        },
1337        .init = qat_alg_aead_sha512_init,
1338        .exit = qat_alg_aead_exit,
1339        .setkey = qat_alg_aead_setkey,
1340        .decrypt = qat_alg_aead_dec,
1341        .encrypt = qat_alg_aead_enc,
1342        .ivsize = AES_BLOCK_SIZE,
1343        .maxauthsize = SHA512_DIGEST_SIZE,
1344} };
1345
1346static struct skcipher_alg qat_skciphers[] = { {
1347        .base.cra_name = "cbc(aes)",
1348        .base.cra_driver_name = "qat_aes_cbc",
1349        .base.cra_priority = 4001,
1350        .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1351        .base.cra_blocksize = AES_BLOCK_SIZE,
1352        .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1353        .base.cra_alignmask = 0,
1354        .base.cra_module = THIS_MODULE,
1355
1356        .init = qat_alg_skcipher_init_tfm,
1357        .exit = qat_alg_skcipher_exit_tfm,
1358        .setkey = qat_alg_skcipher_cbc_setkey,
1359        .decrypt = qat_alg_skcipher_blk_decrypt,
1360        .encrypt = qat_alg_skcipher_blk_encrypt,
1361        .min_keysize = AES_MIN_KEY_SIZE,
1362        .max_keysize = AES_MAX_KEY_SIZE,
1363        .ivsize = AES_BLOCK_SIZE,
1364}, {
1365        .base.cra_name = "ctr(aes)",
1366        .base.cra_driver_name = "qat_aes_ctr",
1367        .base.cra_priority = 4001,
1368        .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1369        .base.cra_blocksize = 1,
1370        .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1371        .base.cra_alignmask = 0,
1372        .base.cra_module = THIS_MODULE,
1373
1374        .init = qat_alg_skcipher_init_tfm,
1375        .exit = qat_alg_skcipher_exit_tfm,
1376        .setkey = qat_alg_skcipher_ctr_setkey,
1377        .decrypt = qat_alg_skcipher_decrypt,
1378        .encrypt = qat_alg_skcipher_encrypt,
1379        .min_keysize = AES_MIN_KEY_SIZE,
1380        .max_keysize = AES_MAX_KEY_SIZE,
1381        .ivsize = AES_BLOCK_SIZE,
1382}, {
1383        .base.cra_name = "xts(aes)",
1384        .base.cra_driver_name = "qat_aes_xts",
1385        .base.cra_priority = 4001,
1386        .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1387                          CRYPTO_ALG_ALLOCATES_MEMORY,
1388        .base.cra_blocksize = AES_BLOCK_SIZE,
1389        .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1390        .base.cra_alignmask = 0,
1391        .base.cra_module = THIS_MODULE,
1392
1393        .init = qat_alg_skcipher_init_xts_tfm,
1394        .exit = qat_alg_skcipher_exit_xts_tfm,
1395        .setkey = qat_alg_skcipher_xts_setkey,
1396        .decrypt = qat_alg_skcipher_xts_decrypt,
1397        .encrypt = qat_alg_skcipher_xts_encrypt,
1398        .min_keysize = 2 * AES_MIN_KEY_SIZE,
1399        .max_keysize = 2 * AES_MAX_KEY_SIZE,
1400        .ivsize = AES_BLOCK_SIZE,
1401} };
1402
1403int qat_algs_register(void)
1404{
1405        int ret = 0;
1406
1407        mutex_lock(&algs_lock);
1408        if (++active_devs != 1)
1409                goto unlock;
1410
1411        ret = crypto_register_skciphers(qat_skciphers,
1412                                        ARRAY_SIZE(qat_skciphers));
1413        if (ret)
1414                goto unlock;
1415
1416        ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1417        if (ret)
1418                goto unreg_algs;
1419
1420unlock:
1421        mutex_unlock(&algs_lock);
1422        return ret;
1423
1424unreg_algs:
1425        crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1426        goto unlock;
1427}
1428
1429void qat_algs_unregister(void)
1430{
1431        mutex_lock(&algs_lock);
1432        if (--active_devs != 0)
1433                goto unlock;
1434
1435        crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1436        crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1437
1438unlock:
1439        mutex_unlock(&algs_lock);
1440}
1441