linux/drivers/crypto/qat/qat_common/qat_algs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
   2/* Copyright(c) 2014 - 2020 Intel Corporation */
   3#include <linux/module.h>
   4#include <linux/slab.h>
   5#include <linux/crypto.h>
   6#include <crypto/internal/aead.h>
   7#include <crypto/internal/cipher.h>
   8#include <crypto/internal/skcipher.h>
   9#include <crypto/aes.h>
  10#include <crypto/sha1.h>
  11#include <crypto/sha2.h>
  12#include <crypto/hash.h>
  13#include <crypto/hmac.h>
  14#include <crypto/algapi.h>
  15#include <crypto/authenc.h>
  16#include <crypto/scatterwalk.h>
  17#include <crypto/xts.h>
  18#include <linux/dma-mapping.h>
  19#include "adf_accel_devices.h"
  20#include "qat_algs_send.h"
  21#include "adf_common_drv.h"
  22#include "qat_crypto.h"
  23#include "icp_qat_hw.h"
  24#include "icp_qat_fw.h"
  25#include "icp_qat_fw_la.h"
  26
  27#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
  28        ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
  29                                       ICP_QAT_HW_CIPHER_NO_CONVERT, \
  30                                       ICP_QAT_HW_CIPHER_ENCRYPT)
  31
  32#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
  33        ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
  34                                       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
  35                                       ICP_QAT_HW_CIPHER_DECRYPT)
  36
  37#define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
  38        ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
  39                                       ICP_QAT_HW_CIPHER_NO_CONVERT, \
  40                                       ICP_QAT_HW_CIPHER_DECRYPT)
  41
  42#define HW_CAP_AES_V2(accel_dev) \
  43        (GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
  44         ICP_ACCEL_CAPABILITIES_AES_V2)
  45
  46static DEFINE_MUTEX(algs_lock);
  47static unsigned int active_devs;
  48
  49/* Common content descriptor */
  50struct qat_alg_cd {
  51        union {
  52                struct qat_enc { /* Encrypt content desc */
  53                        struct icp_qat_hw_cipher_algo_blk cipher;
  54                        struct icp_qat_hw_auth_algo_blk hash;
  55                } qat_enc_cd;
  56                struct qat_dec { /* Decrypt content desc */
  57                        struct icp_qat_hw_auth_algo_blk hash;
  58                        struct icp_qat_hw_cipher_algo_blk cipher;
  59                } qat_dec_cd;
  60        };
  61} __aligned(64);
  62
  63struct qat_alg_aead_ctx {
  64        struct qat_alg_cd *enc_cd;
  65        struct qat_alg_cd *dec_cd;
  66        dma_addr_t enc_cd_paddr;
  67        dma_addr_t dec_cd_paddr;
  68        struct icp_qat_fw_la_bulk_req enc_fw_req;
  69        struct icp_qat_fw_la_bulk_req dec_fw_req;
  70        struct crypto_shash *hash_tfm;
  71        enum icp_qat_hw_auth_algo qat_hash_alg;
  72        struct qat_crypto_instance *inst;
  73        union {
  74                struct sha1_state sha1;
  75                struct sha256_state sha256;
  76                struct sha512_state sha512;
  77        };
  78        char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
  79        char opad[SHA512_BLOCK_SIZE];
  80};
  81
  82struct qat_alg_skcipher_ctx {
  83        struct icp_qat_hw_cipher_algo_blk *enc_cd;
  84        struct icp_qat_hw_cipher_algo_blk *dec_cd;
  85        dma_addr_t enc_cd_paddr;
  86        dma_addr_t dec_cd_paddr;
  87        struct icp_qat_fw_la_bulk_req enc_fw_req;
  88        struct icp_qat_fw_la_bulk_req dec_fw_req;
  89        struct qat_crypto_instance *inst;
  90        struct crypto_skcipher *ftfm;
  91        struct crypto_cipher *tweak;
  92        bool fallback;
  93        int mode;
  94};
  95
  96static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
  97{
  98        switch (qat_hash_alg) {
  99        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 100                return ICP_QAT_HW_SHA1_STATE1_SZ;
 101        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 102                return ICP_QAT_HW_SHA256_STATE1_SZ;
 103        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 104                return ICP_QAT_HW_SHA512_STATE1_SZ;
 105        default:
 106                return -EFAULT;
 107        }
 108        return -EFAULT;
 109}
 110
 111static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
 112                                  struct qat_alg_aead_ctx *ctx,
 113                                  const u8 *auth_key,
 114                                  unsigned int auth_keylen)
 115{
 116        SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
 117        int block_size = crypto_shash_blocksize(ctx->hash_tfm);
 118        int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
 119        __be32 *hash_state_out;
 120        __be64 *hash512_state_out;
 121        int i, offset;
 122
 123        memset(ctx->ipad, 0, block_size);
 124        memset(ctx->opad, 0, block_size);
 125        shash->tfm = ctx->hash_tfm;
 126
 127        if (auth_keylen > block_size) {
 128                int ret = crypto_shash_digest(shash, auth_key,
 129                                              auth_keylen, ctx->ipad);
 130                if (ret)
 131                        return ret;
 132
 133                memcpy(ctx->opad, ctx->ipad, digest_size);
 134        } else {
 135                memcpy(ctx->ipad, auth_key, auth_keylen);
 136                memcpy(ctx->opad, auth_key, auth_keylen);
 137        }
 138
 139        for (i = 0; i < block_size; i++) {
 140                char *ipad_ptr = ctx->ipad + i;
 141                char *opad_ptr = ctx->opad + i;
 142                *ipad_ptr ^= HMAC_IPAD_VALUE;
 143                *opad_ptr ^= HMAC_OPAD_VALUE;
 144        }
 145
 146        if (crypto_shash_init(shash))
 147                return -EFAULT;
 148
 149        if (crypto_shash_update(shash, ctx->ipad, block_size))
 150                return -EFAULT;
 151
 152        hash_state_out = (__be32 *)hash->sha.state1;
 153        hash512_state_out = (__be64 *)hash_state_out;
 154
 155        switch (ctx->qat_hash_alg) {
 156        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 157                if (crypto_shash_export(shash, &ctx->sha1))
 158                        return -EFAULT;
 159                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 160                        *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
 161                break;
 162        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 163                if (crypto_shash_export(shash, &ctx->sha256))
 164                        return -EFAULT;
 165                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 166                        *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
 167                break;
 168        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 169                if (crypto_shash_export(shash, &ctx->sha512))
 170                        return -EFAULT;
 171                for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
 172                        *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
 173                break;
 174        default:
 175                return -EFAULT;
 176        }
 177
 178        if (crypto_shash_init(shash))
 179                return -EFAULT;
 180
 181        if (crypto_shash_update(shash, ctx->opad, block_size))
 182                return -EFAULT;
 183
 184        offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
 185        if (offset < 0)
 186                return -EFAULT;
 187
 188        hash_state_out = (__be32 *)(hash->sha.state1 + offset);
 189        hash512_state_out = (__be64 *)hash_state_out;
 190
 191        switch (ctx->qat_hash_alg) {
 192        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 193                if (crypto_shash_export(shash, &ctx->sha1))
 194                        return -EFAULT;
 195                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 196                        *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
 197                break;
 198        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 199                if (crypto_shash_export(shash, &ctx->sha256))
 200                        return -EFAULT;
 201                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 202                        *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
 203                break;
 204        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 205                if (crypto_shash_export(shash, &ctx->sha512))
 206                        return -EFAULT;
 207                for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
 208                        *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
 209                break;
 210        default:
 211                return -EFAULT;
 212        }
 213        memzero_explicit(ctx->ipad, block_size);
 214        memzero_explicit(ctx->opad, block_size);
 215        return 0;
 216}
 217
 218static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
 219{
 220        header->hdr_flags =
 221                ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
 222        header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
 223        header->comn_req_flags =
 224                ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
 225                                            QAT_COMN_PTR_TYPE_SGL);
 226        ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
 227                                  ICP_QAT_FW_LA_PARTIAL_NONE);
 228        ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
 229                                           ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
 230        ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
 231                                ICP_QAT_FW_LA_NO_PROTO);
 232        ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
 233                                       ICP_QAT_FW_LA_NO_UPDATE_STATE);
 234}
 235
 236static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
 237                                         int alg,
 238                                         struct crypto_authenc_keys *keys,
 239                                         int mode)
 240{
 241        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
 242        unsigned int digestsize = crypto_aead_authsize(aead_tfm);
 243        struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
 244        struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
 245        struct icp_qat_hw_auth_algo_blk *hash =
 246                (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
 247                sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
 248        struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
 249        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
 250        struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
 251        void *ptr = &req_tmpl->cd_ctrl;
 252        struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
 253        struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
 254
 255        /* CD setup */
 256        cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
 257        memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
 258        hash->sha.inner_setup.auth_config.config =
 259                ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
 260                                             ctx->qat_hash_alg, digestsize);
 261        hash->sha.inner_setup.auth_counter.counter =
 262                cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
 263
 264        if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
 265                return -EFAULT;
 266
 267        /* Request setup */
 268        qat_alg_init_common_hdr(header);
 269        header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
 270        ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
 271                                           ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
 272        ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
 273                                   ICP_QAT_FW_LA_RET_AUTH_RES);
 274        ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
 275                                   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
 276        cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
 277        cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
 278
 279        /* Cipher CD config setup */
 280        cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
 281        cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
 282        cipher_cd_ctrl->cipher_cfg_offset = 0;
 283        ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 284        ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
 285        /* Auth CD config setup */
 286        hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
 287        hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
 288        hash_cd_ctrl->inner_res_sz = digestsize;
 289        hash_cd_ctrl->final_sz = digestsize;
 290
 291        switch (ctx->qat_hash_alg) {
 292        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 293                hash_cd_ctrl->inner_state1_sz =
 294                        round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
 295                hash_cd_ctrl->inner_state2_sz =
 296                        round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
 297                break;
 298        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 299                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
 300                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
 301                break;
 302        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 303                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
 304                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
 305                break;
 306        default:
 307                break;
 308        }
 309        hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
 310                        ((sizeof(struct icp_qat_hw_auth_setup) +
 311                         round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
 312        ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
 313        ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
 314        return 0;
 315}
 316
 317static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
 318                                         int alg,
 319                                         struct crypto_authenc_keys *keys,
 320                                         int mode)
 321{
 322        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
 323        unsigned int digestsize = crypto_aead_authsize(aead_tfm);
 324        struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
 325        struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
 326        struct icp_qat_hw_cipher_algo_blk *cipher =
 327                (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
 328                sizeof(struct icp_qat_hw_auth_setup) +
 329                roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
 330        struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
 331        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
 332        struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
 333        void *ptr = &req_tmpl->cd_ctrl;
 334        struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
 335        struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
 336        struct icp_qat_fw_la_auth_req_params *auth_param =
 337                (struct icp_qat_fw_la_auth_req_params *)
 338                ((char *)&req_tmpl->serv_specif_rqpars +
 339                sizeof(struct icp_qat_fw_la_cipher_req_params));
 340
 341        /* CD setup */
 342        cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
 343        memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
 344        hash->sha.inner_setup.auth_config.config =
 345                ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
 346                                             ctx->qat_hash_alg,
 347                                             digestsize);
 348        hash->sha.inner_setup.auth_counter.counter =
 349                cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
 350
 351        if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
 352                return -EFAULT;
 353
 354        /* Request setup */
 355        qat_alg_init_common_hdr(header);
 356        header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
 357        ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
 358                                           ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
 359        ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
 360                                   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
 361        ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
 362                                   ICP_QAT_FW_LA_CMP_AUTH_RES);
 363        cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
 364        cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
 365
 366        /* Cipher CD config setup */
 367        cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
 368        cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
 369        cipher_cd_ctrl->cipher_cfg_offset =
 370                (sizeof(struct icp_qat_hw_auth_setup) +
 371                 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
 372        ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 373        ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
 374
 375        /* Auth CD config setup */
 376        hash_cd_ctrl->hash_cfg_offset = 0;
 377        hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
 378        hash_cd_ctrl->inner_res_sz = digestsize;
 379        hash_cd_ctrl->final_sz = digestsize;
 380
 381        switch (ctx->qat_hash_alg) {
 382        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 383                hash_cd_ctrl->inner_state1_sz =
 384                        round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
 385                hash_cd_ctrl->inner_state2_sz =
 386                        round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
 387                break;
 388        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 389                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
 390                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
 391                break;
 392        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 393                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
 394                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
 395                break;
 396        default:
 397                break;
 398        }
 399
 400        hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
 401                        ((sizeof(struct icp_qat_hw_auth_setup) +
 402                         round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
 403        auth_param->auth_res_sz = digestsize;
 404        ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
 405        ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 406        return 0;
 407}
 408
 409static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
 410                                      struct icp_qat_fw_la_bulk_req *req,
 411                                      struct icp_qat_hw_cipher_algo_blk *cd,
 412                                      const u8 *key, unsigned int keylen)
 413{
 414        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
 415        struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
 416        struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
 417        bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
 418        int mode = ctx->mode;
 419
 420        qat_alg_init_common_hdr(header);
 421        header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
 422        cd_pars->u.s.content_desc_params_sz =
 423                                sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
 424
 425        if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
 426                ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
 427                                             ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
 428
 429                /* Store both XTS keys in CD, only the first key is sent
 430                 * to the HW, the second key is used for tweak calculation
 431                 */
 432                memcpy(cd->ucs_aes.key, key, keylen);
 433                keylen = keylen / 2;
 434        } else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
 435                ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
 436                                             ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
 437                keylen = round_up(keylen, 16);
 438                memcpy(cd->ucs_aes.key, key, keylen);
 439        } else {
 440                memcpy(cd->aes.key, key, keylen);
 441        }
 442
 443        /* Cipher CD config setup */
 444        cd_ctrl->cipher_key_sz = keylen >> 3;
 445        cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
 446        cd_ctrl->cipher_cfg_offset = 0;
 447        ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 448        ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
 449}
 450
 451static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
 452                                      int alg, const u8 *key,
 453                                      unsigned int keylen, int mode)
 454{
 455        struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
 456        struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
 457        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
 458
 459        qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
 460        cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
 461        enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
 462}
 463
 464static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen,
 465                                    u8 *key_reverse)
 466{
 467        struct crypto_aes_ctx aes_expanded;
 468        int nrounds;
 469        u8 *key;
 470
 471        aes_expandkey(&aes_expanded, key_forward, keylen);
 472        if (keylen == AES_KEYSIZE_128) {
 473                nrounds = 10;
 474                key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
 475                memcpy(key_reverse, key, AES_BLOCK_SIZE);
 476        } else {
 477                /* AES_KEYSIZE_256 */
 478                nrounds = 14;
 479                key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
 480                memcpy(key_reverse, key, AES_BLOCK_SIZE);
 481                memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE,
 482                       AES_BLOCK_SIZE);
 483        }
 484}
 485
 486static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
 487                                      int alg, const u8 *key,
 488                                      unsigned int keylen, int mode)
 489{
 490        struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
 491        struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
 492        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
 493        bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
 494
 495        qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
 496        cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
 497
 498        if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
 499                /* Key reversing not supported, set no convert */
 500                dec_cd->aes.cipher_config.val =
 501                                QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode);
 502
 503                /* In-place key reversal */
 504                qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2,
 505                                        dec_cd->ucs_aes.key);
 506        } else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) {
 507                dec_cd->aes.cipher_config.val =
 508                                        QAT_AES_HW_CONFIG_DEC(alg, mode);
 509        } else {
 510                dec_cd->aes.cipher_config.val =
 511                                        QAT_AES_HW_CONFIG_ENC(alg, mode);
 512        }
 513}
 514
 515static int qat_alg_validate_key(int key_len, int *alg, int mode)
 516{
 517        if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
 518                switch (key_len) {
 519                case AES_KEYSIZE_128:
 520                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
 521                        break;
 522                case AES_KEYSIZE_192:
 523                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
 524                        break;
 525                case AES_KEYSIZE_256:
 526                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
 527                        break;
 528                default:
 529                        return -EINVAL;
 530                }
 531        } else {
 532                switch (key_len) {
 533                case AES_KEYSIZE_128 << 1:
 534                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
 535                        break;
 536                case AES_KEYSIZE_256 << 1:
 537                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
 538                        break;
 539                default:
 540                        return -EINVAL;
 541                }
 542        }
 543        return 0;
 544}
 545
 546static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
 547                                      unsigned int keylen,  int mode)
 548{
 549        struct crypto_authenc_keys keys;
 550        int alg;
 551
 552        if (crypto_authenc_extractkeys(&keys, key, keylen))
 553                goto bad_key;
 554
 555        if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
 556                goto bad_key;
 557
 558        if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
 559                goto error;
 560
 561        if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
 562                goto error;
 563
 564        memzero_explicit(&keys, sizeof(keys));
 565        return 0;
 566bad_key:
 567        memzero_explicit(&keys, sizeof(keys));
 568        return -EINVAL;
 569error:
 570        memzero_explicit(&keys, sizeof(keys));
 571        return -EFAULT;
 572}
 573
 574static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
 575                                          const u8 *key,
 576                                          unsigned int keylen,
 577                                          int mode)
 578{
 579        int alg;
 580
 581        if (qat_alg_validate_key(keylen, &alg, mode))
 582                return -EINVAL;
 583
 584        qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
 585        qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
 586        return 0;
 587}
 588
 589static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
 590                              unsigned int keylen)
 591{
 592        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 593
 594        memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
 595        memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
 596        memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
 597        memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
 598
 599        return qat_alg_aead_init_sessions(tfm, key, keylen,
 600                                          ICP_QAT_HW_CIPHER_CBC_MODE);
 601}
 602
 603static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
 604                               unsigned int keylen)
 605{
 606        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 607        struct qat_crypto_instance *inst = NULL;
 608        int node = get_current_node();
 609        struct device *dev;
 610        int ret;
 611
 612        inst = qat_crypto_get_instance_node(node);
 613        if (!inst)
 614                return -EINVAL;
 615        dev = &GET_DEV(inst->accel_dev);
 616        ctx->inst = inst;
 617        ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
 618                                         &ctx->enc_cd_paddr,
 619                                         GFP_ATOMIC);
 620        if (!ctx->enc_cd) {
 621                ret = -ENOMEM;
 622                goto out_free_inst;
 623        }
 624        ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
 625                                         &ctx->dec_cd_paddr,
 626                                         GFP_ATOMIC);
 627        if (!ctx->dec_cd) {
 628                ret = -ENOMEM;
 629                goto out_free_enc;
 630        }
 631
 632        ret = qat_alg_aead_init_sessions(tfm, key, keylen,
 633                                         ICP_QAT_HW_CIPHER_CBC_MODE);
 634        if (ret)
 635                goto out_free_all;
 636
 637        return 0;
 638
 639out_free_all:
 640        memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
 641        dma_free_coherent(dev, sizeof(struct qat_alg_cd),
 642                          ctx->dec_cd, ctx->dec_cd_paddr);
 643        ctx->dec_cd = NULL;
 644out_free_enc:
 645        memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
 646        dma_free_coherent(dev, sizeof(struct qat_alg_cd),
 647                          ctx->enc_cd, ctx->enc_cd_paddr);
 648        ctx->enc_cd = NULL;
 649out_free_inst:
 650        ctx->inst = NULL;
 651        qat_crypto_put_instance(inst);
 652        return ret;
 653}
 654
 655static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 656                               unsigned int keylen)
 657{
 658        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 659
 660        if (ctx->enc_cd)
 661                return qat_alg_aead_rekey(tfm, key, keylen);
 662        else
 663                return qat_alg_aead_newkey(tfm, key, keylen);
 664}
 665
 666static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
 667                              struct qat_crypto_request *qat_req)
 668{
 669        struct device *dev = &GET_DEV(inst->accel_dev);
 670        struct qat_alg_buf_list *bl = qat_req->buf.bl;
 671        struct qat_alg_buf_list *blout = qat_req->buf.blout;
 672        dma_addr_t blp = qat_req->buf.blp;
 673        dma_addr_t blpout = qat_req->buf.bloutp;
 674        size_t sz = qat_req->buf.sz;
 675        size_t sz_out = qat_req->buf.sz_out;
 676        int i;
 677
 678        for (i = 0; i < bl->num_bufs; i++)
 679                dma_unmap_single(dev, bl->bufers[i].addr,
 680                                 bl->bufers[i].len, DMA_BIDIRECTIONAL);
 681
 682        dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
 683
 684        if (!qat_req->buf.sgl_src_valid)
 685                kfree(bl);
 686
 687        if (blp != blpout) {
 688                /* If out of place operation dma unmap only data */
 689                int bufless = blout->num_bufs - blout->num_mapped_bufs;
 690
 691                for (i = bufless; i < blout->num_bufs; i++) {
 692                        dma_unmap_single(dev, blout->bufers[i].addr,
 693                                         blout->bufers[i].len,
 694                                         DMA_BIDIRECTIONAL);
 695                }
 696                dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
 697
 698                if (!qat_req->buf.sgl_dst_valid)
 699                        kfree(blout);
 700        }
 701}
 702
 703static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
 704                               struct scatterlist *sgl,
 705                               struct scatterlist *sglout,
 706                               struct qat_crypto_request *qat_req,
 707                               gfp_t flags)
 708{
 709        struct device *dev = &GET_DEV(inst->accel_dev);
 710        int i, sg_nctr = 0;
 711        int n = sg_nents(sgl);
 712        struct qat_alg_buf_list *bufl;
 713        struct qat_alg_buf_list *buflout = NULL;
 714        dma_addr_t blp = DMA_MAPPING_ERROR;
 715        dma_addr_t bloutp = DMA_MAPPING_ERROR;
 716        struct scatterlist *sg;
 717        size_t sz_out, sz = struct_size(bufl, bufers, n);
 718        int node = dev_to_node(&GET_DEV(inst->accel_dev));
 719
 720        if (unlikely(!n))
 721                return -EINVAL;
 722
 723        qat_req->buf.sgl_src_valid = false;
 724        qat_req->buf.sgl_dst_valid = false;
 725
 726        if (n > QAT_MAX_BUFF_DESC) {
 727                bufl = kzalloc_node(sz, flags, node);
 728                if (unlikely(!bufl))
 729                        return -ENOMEM;
 730        } else {
 731                bufl = &qat_req->buf.sgl_src.sgl_hdr;
 732                memset(bufl, 0, sizeof(struct qat_alg_buf_list));
 733                qat_req->buf.sgl_src_valid = true;
 734        }
 735
 736        for_each_sg(sgl, sg, n, i)
 737                bufl->bufers[i].addr = DMA_MAPPING_ERROR;
 738
 739        for_each_sg(sgl, sg, n, i) {
 740                int y = sg_nctr;
 741
 742                if (!sg->length)
 743                        continue;
 744
 745                bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
 746                                                      sg->length,
 747                                                      DMA_BIDIRECTIONAL);
 748                bufl->bufers[y].len = sg->length;
 749                if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
 750                        goto err_in;
 751                sg_nctr++;
 752        }
 753        bufl->num_bufs = sg_nctr;
 754        blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
 755        if (unlikely(dma_mapping_error(dev, blp)))
 756                goto err_in;
 757        qat_req->buf.bl = bufl;
 758        qat_req->buf.blp = blp;
 759        qat_req->buf.sz = sz;
 760        /* Handle out of place operation */
 761        if (sgl != sglout) {
 762                struct qat_alg_buf *bufers;
 763
 764                n = sg_nents(sglout);
 765                sz_out = struct_size(buflout, bufers, n);
 766                sg_nctr = 0;
 767
 768                if (n > QAT_MAX_BUFF_DESC) {
 769                        buflout = kzalloc_node(sz_out, flags, node);
 770                        if (unlikely(!buflout))
 771                                goto err_in;
 772                } else {
 773                        buflout = &qat_req->buf.sgl_dst.sgl_hdr;
 774                        memset(buflout, 0, sizeof(struct qat_alg_buf_list));
 775                        qat_req->buf.sgl_dst_valid = true;
 776                }
 777
 778                bufers = buflout->bufers;
 779                for_each_sg(sglout, sg, n, i)
 780                        bufers[i].addr = DMA_MAPPING_ERROR;
 781
 782                for_each_sg(sglout, sg, n, i) {
 783                        int y = sg_nctr;
 784
 785                        if (!sg->length)
 786                                continue;
 787
 788                        bufers[y].addr = dma_map_single(dev, sg_virt(sg),
 789                                                        sg->length,
 790                                                        DMA_BIDIRECTIONAL);
 791                        if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
 792                                goto err_out;
 793                        bufers[y].len = sg->length;
 794                        sg_nctr++;
 795                }
 796                buflout->num_bufs = sg_nctr;
 797                buflout->num_mapped_bufs = sg_nctr;
 798                bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
 799                if (unlikely(dma_mapping_error(dev, bloutp)))
 800                        goto err_out;
 801                qat_req->buf.blout = buflout;
 802                qat_req->buf.bloutp = bloutp;
 803                qat_req->buf.sz_out = sz_out;
 804        } else {
 805                /* Otherwise set the src and dst to the same address */
 806                qat_req->buf.bloutp = qat_req->buf.blp;
 807                qat_req->buf.sz_out = 0;
 808        }
 809        return 0;
 810
 811err_out:
 812        if (!dma_mapping_error(dev, bloutp))
 813                dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
 814
 815        n = sg_nents(sglout);
 816        for (i = 0; i < n; i++)
 817                if (!dma_mapping_error(dev, buflout->bufers[i].addr))
 818                        dma_unmap_single(dev, buflout->bufers[i].addr,
 819                                         buflout->bufers[i].len,
 820                                         DMA_BIDIRECTIONAL);
 821
 822        if (!qat_req->buf.sgl_dst_valid)
 823                kfree(buflout);
 824
 825err_in:
 826        if (!dma_mapping_error(dev, blp))
 827                dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
 828
 829        n = sg_nents(sgl);
 830        for (i = 0; i < n; i++)
 831                if (!dma_mapping_error(dev, bufl->bufers[i].addr))
 832                        dma_unmap_single(dev, bufl->bufers[i].addr,
 833                                         bufl->bufers[i].len,
 834                                         DMA_BIDIRECTIONAL);
 835
 836        if (!qat_req->buf.sgl_src_valid)
 837                kfree(bufl);
 838
 839        dev_err(dev, "Failed to map buf for dma\n");
 840        return -ENOMEM;
 841}
 842
 843static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
 844                                  struct qat_crypto_request *qat_req)
 845{
 846        struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
 847        struct qat_crypto_instance *inst = ctx->inst;
 848        struct aead_request *areq = qat_req->aead_req;
 849        u8 stat_filed = qat_resp->comn_resp.comn_status;
 850        int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
 851
 852        qat_alg_free_bufl(inst, qat_req);
 853        if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
 854                res = -EBADMSG;
 855        areq->base.complete(&areq->base, res);
 856}
 857
 858static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
 859{
 860        struct skcipher_request *sreq = qat_req->skcipher_req;
 861        u64 iv_lo_prev;
 862        u64 iv_lo;
 863        u64 iv_hi;
 864
 865        memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
 866
 867        iv_lo = be64_to_cpu(qat_req->iv_lo);
 868        iv_hi = be64_to_cpu(qat_req->iv_hi);
 869
 870        iv_lo_prev = iv_lo;
 871        iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
 872        if (iv_lo < iv_lo_prev)
 873                iv_hi++;
 874
 875        qat_req->iv_lo = cpu_to_be64(iv_lo);
 876        qat_req->iv_hi = cpu_to_be64(iv_hi);
 877}
 878
 879static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
 880{
 881        struct skcipher_request *sreq = qat_req->skcipher_req;
 882        int offset = sreq->cryptlen - AES_BLOCK_SIZE;
 883        struct scatterlist *sgl;
 884
 885        if (qat_req->encryption)
 886                sgl = sreq->dst;
 887        else
 888                sgl = sreq->src;
 889
 890        scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
 891}
 892
 893static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
 894{
 895        struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
 896        struct device *dev = &GET_DEV(ctx->inst->accel_dev);
 897
 898        switch (ctx->mode) {
 899        case ICP_QAT_HW_CIPHER_CTR_MODE:
 900                qat_alg_update_iv_ctr_mode(qat_req);
 901                break;
 902        case ICP_QAT_HW_CIPHER_CBC_MODE:
 903                qat_alg_update_iv_cbc_mode(qat_req);
 904                break;
 905        case ICP_QAT_HW_CIPHER_XTS_MODE:
 906                break;
 907        default:
 908                dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
 909                         ctx->mode);
 910        }
 911}
 912
 913static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
 914                                      struct qat_crypto_request *qat_req)
 915{
 916        struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
 917        struct qat_crypto_instance *inst = ctx->inst;
 918        struct skcipher_request *sreq = qat_req->skcipher_req;
 919        u8 stat_filed = qat_resp->comn_resp.comn_status;
 920        int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
 921
 922        qat_alg_free_bufl(inst, qat_req);
 923        if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
 924                res = -EINVAL;
 925
 926        if (qat_req->encryption)
 927                qat_alg_update_iv(qat_req);
 928
 929        memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
 930
 931        sreq->base.complete(&sreq->base, res);
 932}
 933
 934void qat_alg_callback(void *resp)
 935{
 936        struct icp_qat_fw_la_resp *qat_resp = resp;
 937        struct qat_crypto_request *qat_req =
 938                                (void *)(__force long)qat_resp->opaque_data;
 939        struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
 940
 941        qat_req->cb(qat_resp, qat_req);
 942
 943        qat_alg_send_backlog(backlog);
 944}
 945
 946static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
 947                                    struct qat_crypto_instance *inst,
 948                                    struct crypto_async_request *base)
 949{
 950        struct qat_alg_req *alg_req = &qat_req->alg_req;
 951
 952        alg_req->fw_req = (u32 *)&qat_req->req;
 953        alg_req->tx_ring = inst->sym_tx;
 954        alg_req->base = base;
 955        alg_req->backlog = &inst->backlog;
 956
 957        return qat_alg_send_message(alg_req);
 958}
 959
 960static int qat_alg_aead_dec(struct aead_request *areq)
 961{
 962        struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
 963        struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
 964        struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
 965        struct qat_crypto_request *qat_req = aead_request_ctx(areq);
 966        struct icp_qat_fw_la_cipher_req_params *cipher_param;
 967        struct icp_qat_fw_la_auth_req_params *auth_param;
 968        struct icp_qat_fw_la_bulk_req *msg;
 969        int digst_size = crypto_aead_authsize(aead_tfm);
 970        gfp_t f = qat_algs_alloc_flags(&areq->base);
 971        int ret;
 972        u32 cipher_len;
 973
 974        cipher_len = areq->cryptlen - digst_size;
 975        if (cipher_len % AES_BLOCK_SIZE != 0)
 976                return -EINVAL;
 977
 978        ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
 979        if (unlikely(ret))
 980                return ret;
 981
 982        msg = &qat_req->req;
 983        *msg = ctx->dec_fw_req;
 984        qat_req->aead_ctx = ctx;
 985        qat_req->aead_req = areq;
 986        qat_req->cb = qat_aead_alg_callback;
 987        qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
 988        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
 989        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
 990        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
 991        cipher_param->cipher_length = cipher_len;
 992        cipher_param->cipher_offset = areq->assoclen;
 993        memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
 994        auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
 995        auth_param->auth_off = 0;
 996        auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
 997
 998        ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
 999        if (ret == -ENOSPC)
1000                qat_alg_free_bufl(ctx->inst, qat_req);
1001
1002        return ret;
1003}
1004
1005static int qat_alg_aead_enc(struct aead_request *areq)
1006{
1007        struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
1008        struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
1009        struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1010        struct qat_crypto_request *qat_req = aead_request_ctx(areq);
1011        struct icp_qat_fw_la_cipher_req_params *cipher_param;
1012        struct icp_qat_fw_la_auth_req_params *auth_param;
1013        gfp_t f = qat_algs_alloc_flags(&areq->base);
1014        struct icp_qat_fw_la_bulk_req *msg;
1015        u8 *iv = areq->iv;
1016        int ret;
1017
1018        if (areq->cryptlen % AES_BLOCK_SIZE != 0)
1019                return -EINVAL;
1020
1021        ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
1022        if (unlikely(ret))
1023                return ret;
1024
1025        msg = &qat_req->req;
1026        *msg = ctx->enc_fw_req;
1027        qat_req->aead_ctx = ctx;
1028        qat_req->aead_req = areq;
1029        qat_req->cb = qat_aead_alg_callback;
1030        qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1031        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1032        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1033        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1034        auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
1035
1036        memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
1037        cipher_param->cipher_length = areq->cryptlen;
1038        cipher_param->cipher_offset = areq->assoclen;
1039
1040        auth_param->auth_off = 0;
1041        auth_param->auth_len = areq->assoclen + areq->cryptlen;
1042
1043        ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
1044        if (ret == -ENOSPC)
1045                qat_alg_free_bufl(ctx->inst, qat_req);
1046
1047        return ret;
1048}
1049
1050static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
1051                                  const u8 *key, unsigned int keylen,
1052                                  int mode)
1053{
1054        memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1055        memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
1056        memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
1057        memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
1058
1059        return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
1060}
1061
1062static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
1063                                   const u8 *key, unsigned int keylen,
1064                                   int mode)
1065{
1066        struct qat_crypto_instance *inst = NULL;
1067        struct device *dev;
1068        int node = get_current_node();
1069        int ret;
1070
1071        inst = qat_crypto_get_instance_node(node);
1072        if (!inst)
1073                return -EINVAL;
1074        dev = &GET_DEV(inst->accel_dev);
1075        ctx->inst = inst;
1076        ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
1077                                         &ctx->enc_cd_paddr,
1078                                         GFP_ATOMIC);
1079        if (!ctx->enc_cd) {
1080                ret = -ENOMEM;
1081                goto out_free_instance;
1082        }
1083        ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
1084                                         &ctx->dec_cd_paddr,
1085                                         GFP_ATOMIC);
1086        if (!ctx->dec_cd) {
1087                ret = -ENOMEM;
1088                goto out_free_enc;
1089        }
1090
1091        ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
1092        if (ret)
1093                goto out_free_all;
1094
1095        return 0;
1096
1097out_free_all:
1098        memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
1099        dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1100                          ctx->dec_cd, ctx->dec_cd_paddr);
1101        ctx->dec_cd = NULL;
1102out_free_enc:
1103        memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1104        dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1105                          ctx->enc_cd, ctx->enc_cd_paddr);
1106        ctx->enc_cd = NULL;
1107out_free_instance:
1108        ctx->inst = NULL;
1109        qat_crypto_put_instance(inst);
1110        return ret;
1111}
1112
1113static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
1114                                   const u8 *key, unsigned int keylen,
1115                                   int mode)
1116{
1117        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1118
1119        ctx->mode = mode;
1120
1121        if (ctx->enc_cd)
1122                return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
1123        else
1124                return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
1125}
1126
1127static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
1128                                       const u8 *key, unsigned int keylen)
1129{
1130        return qat_alg_skcipher_setkey(tfm, key, keylen,
1131                                       ICP_QAT_HW_CIPHER_CBC_MODE);
1132}
1133
1134static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
1135                                       const u8 *key, unsigned int keylen)
1136{
1137        return qat_alg_skcipher_setkey(tfm, key, keylen,
1138                                       ICP_QAT_HW_CIPHER_CTR_MODE);
1139}
1140
1141static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
1142                                       const u8 *key, unsigned int keylen)
1143{
1144        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1145        int ret;
1146
1147        ret = xts_verify_key(tfm, key, keylen);
1148        if (ret)
1149                return ret;
1150
1151        if (keylen >> 1 == AES_KEYSIZE_192) {
1152                ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
1153                if (ret)
1154                        return ret;
1155
1156                ctx->fallback = true;
1157
1158                return 0;
1159        }
1160
1161        ctx->fallback = false;
1162
1163        ret = qat_alg_skcipher_setkey(tfm, key, keylen,
1164                                      ICP_QAT_HW_CIPHER_XTS_MODE);
1165        if (ret)
1166                return ret;
1167
1168        if (HW_CAP_AES_V2(ctx->inst->accel_dev))
1169                ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2),
1170                                           keylen / 2);
1171
1172        return ret;
1173}
1174
1175static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req)
1176{
1177        struct icp_qat_fw_la_cipher_req_params *cipher_param;
1178        struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
1179        bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
1180        u8 *iv = qat_req->skcipher_req->iv;
1181
1182        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1183
1184        if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE)
1185                crypto_cipher_encrypt_one(ctx->tweak,
1186                                          (u8 *)cipher_param->u.cipher_IV_array,
1187                                          iv);
1188        else
1189                memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
1190}
1191
1192static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1193{
1194        struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1195        struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1196        struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1197        struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1198        struct icp_qat_fw_la_cipher_req_params *cipher_param;
1199        gfp_t f = qat_algs_alloc_flags(&req->base);
1200        struct icp_qat_fw_la_bulk_req *msg;
1201        int ret;
1202
1203        if (req->cryptlen == 0)
1204                return 0;
1205
1206        ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
1207        if (unlikely(ret))
1208                return ret;
1209
1210        msg = &qat_req->req;
1211        *msg = ctx->enc_fw_req;
1212        qat_req->skcipher_ctx = ctx;
1213        qat_req->skcipher_req = req;
1214        qat_req->cb = qat_skcipher_alg_callback;
1215        qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1216        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1217        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1218        qat_req->encryption = true;
1219        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1220        cipher_param->cipher_length = req->cryptlen;
1221        cipher_param->cipher_offset = 0;
1222
1223        qat_alg_set_req_iv(qat_req);
1224
1225        ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
1226        if (ret == -ENOSPC)
1227                qat_alg_free_bufl(ctx->inst, qat_req);
1228
1229        return ret;
1230}
1231
1232static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1233{
1234        if (req->cryptlen % AES_BLOCK_SIZE != 0)
1235                return -EINVAL;
1236
1237        return qat_alg_skcipher_encrypt(req);
1238}
1239
1240static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1241{
1242        struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1243        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1244        struct skcipher_request *nreq = skcipher_request_ctx(req);
1245
1246        if (req->cryptlen < XTS_BLOCK_SIZE)
1247                return -EINVAL;
1248
1249        if (ctx->fallback) {
1250                memcpy(nreq, req, sizeof(*req));
1251                skcipher_request_set_tfm(nreq, ctx->ftfm);
1252                return crypto_skcipher_encrypt(nreq);
1253        }
1254
1255        return qat_alg_skcipher_encrypt(req);
1256}
1257
1258static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1259{
1260        struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1261        struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1262        struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1263        struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1264        struct icp_qat_fw_la_cipher_req_params *cipher_param;
1265        gfp_t f = qat_algs_alloc_flags(&req->base);
1266        struct icp_qat_fw_la_bulk_req *msg;
1267        int ret;
1268
1269        if (req->cryptlen == 0)
1270                return 0;
1271
1272        ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
1273        if (unlikely(ret))
1274                return ret;
1275
1276        msg = &qat_req->req;
1277        *msg = ctx->dec_fw_req;
1278        qat_req->skcipher_ctx = ctx;
1279        qat_req->skcipher_req = req;
1280        qat_req->cb = qat_skcipher_alg_callback;
1281        qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1282        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1283        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1284        qat_req->encryption = false;
1285        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1286        cipher_param->cipher_length = req->cryptlen;
1287        cipher_param->cipher_offset = 0;
1288
1289        qat_alg_set_req_iv(qat_req);
1290        qat_alg_update_iv(qat_req);
1291
1292        ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
1293        if (ret == -ENOSPC)
1294                qat_alg_free_bufl(ctx->inst, qat_req);
1295
1296        return ret;
1297}
1298
1299static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1300{
1301        if (req->cryptlen % AES_BLOCK_SIZE != 0)
1302                return -EINVAL;
1303
1304        return qat_alg_skcipher_decrypt(req);
1305}
1306
1307static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1308{
1309        struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1310        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1311        struct skcipher_request *nreq = skcipher_request_ctx(req);
1312
1313        if (req->cryptlen < XTS_BLOCK_SIZE)
1314                return -EINVAL;
1315
1316        if (ctx->fallback) {
1317                memcpy(nreq, req, sizeof(*req));
1318                skcipher_request_set_tfm(nreq, ctx->ftfm);
1319                return crypto_skcipher_decrypt(nreq);
1320        }
1321
1322        return qat_alg_skcipher_decrypt(req);
1323}
1324
1325static int qat_alg_aead_init(struct crypto_aead *tfm,
1326                             enum icp_qat_hw_auth_algo hash,
1327                             const char *hash_name)
1328{
1329        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1330
1331        ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1332        if (IS_ERR(ctx->hash_tfm))
1333                return PTR_ERR(ctx->hash_tfm);
1334        ctx->qat_hash_alg = hash;
1335        crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1336        return 0;
1337}
1338
1339static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1340{
1341        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1342}
1343
1344static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1345{
1346        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1347}
1348
1349static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1350{
1351        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1352}
1353
1354static void qat_alg_aead_exit(struct crypto_aead *tfm)
1355{
1356        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1357        struct qat_crypto_instance *inst = ctx->inst;
1358        struct device *dev;
1359
1360        crypto_free_shash(ctx->hash_tfm);
1361
1362        if (!inst)
1363                return;
1364
1365        dev = &GET_DEV(inst->accel_dev);
1366        if (ctx->enc_cd) {
1367                memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1368                dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1369                                  ctx->enc_cd, ctx->enc_cd_paddr);
1370        }
1371        if (ctx->dec_cd) {
1372                memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1373                dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1374                                  ctx->dec_cd, ctx->dec_cd_paddr);
1375        }
1376        qat_crypto_put_instance(inst);
1377}
1378
1379static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1380{
1381        crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1382        return 0;
1383}
1384
1385static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1386{
1387        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1388        int reqsize;
1389
1390        ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1391                                          CRYPTO_ALG_NEED_FALLBACK);
1392        if (IS_ERR(ctx->ftfm))
1393                return PTR_ERR(ctx->ftfm);
1394
1395        ctx->tweak = crypto_alloc_cipher("aes", 0, 0);
1396        if (IS_ERR(ctx->tweak)) {
1397                crypto_free_skcipher(ctx->ftfm);
1398                return PTR_ERR(ctx->tweak);
1399        }
1400
1401        reqsize = max(sizeof(struct qat_crypto_request),
1402                      sizeof(struct skcipher_request) +
1403                      crypto_skcipher_reqsize(ctx->ftfm));
1404        crypto_skcipher_set_reqsize(tfm, reqsize);
1405
1406        return 0;
1407}
1408
1409static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1410{
1411        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1412        struct qat_crypto_instance *inst = ctx->inst;
1413        struct device *dev;
1414
1415        if (!inst)
1416                return;
1417
1418        dev = &GET_DEV(inst->accel_dev);
1419        if (ctx->enc_cd) {
1420                memset(ctx->enc_cd, 0,
1421                       sizeof(struct icp_qat_hw_cipher_algo_blk));
1422                dma_free_coherent(dev,
1423                                  sizeof(struct icp_qat_hw_cipher_algo_blk),
1424                                  ctx->enc_cd, ctx->enc_cd_paddr);
1425        }
1426        if (ctx->dec_cd) {
1427                memset(ctx->dec_cd, 0,
1428                       sizeof(struct icp_qat_hw_cipher_algo_blk));
1429                dma_free_coherent(dev,
1430                                  sizeof(struct icp_qat_hw_cipher_algo_blk),
1431                                  ctx->dec_cd, ctx->dec_cd_paddr);
1432        }
1433        qat_crypto_put_instance(inst);
1434}
1435
1436static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1437{
1438        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1439
1440        if (ctx->ftfm)
1441                crypto_free_skcipher(ctx->ftfm);
1442
1443        if (ctx->tweak)
1444                crypto_free_cipher(ctx->tweak);
1445
1446        qat_alg_skcipher_exit_tfm(tfm);
1447}
1448
1449static struct aead_alg qat_aeads[] = { {
1450        .base = {
1451                .cra_name = "authenc(hmac(sha1),cbc(aes))",
1452                .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1453                .cra_priority = 4001,
1454                .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1455                .cra_blocksize = AES_BLOCK_SIZE,
1456                .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1457                .cra_module = THIS_MODULE,
1458        },
1459        .init = qat_alg_aead_sha1_init,
1460        .exit = qat_alg_aead_exit,
1461        .setkey = qat_alg_aead_setkey,
1462        .decrypt = qat_alg_aead_dec,
1463        .encrypt = qat_alg_aead_enc,
1464        .ivsize = AES_BLOCK_SIZE,
1465        .maxauthsize = SHA1_DIGEST_SIZE,
1466}, {
1467        .base = {
1468                .cra_name = "authenc(hmac(sha256),cbc(aes))",
1469                .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1470                .cra_priority = 4001,
1471                .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1472                .cra_blocksize = AES_BLOCK_SIZE,
1473                .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1474                .cra_module = THIS_MODULE,
1475        },
1476        .init = qat_alg_aead_sha256_init,
1477        .exit = qat_alg_aead_exit,
1478        .setkey = qat_alg_aead_setkey,
1479        .decrypt = qat_alg_aead_dec,
1480        .encrypt = qat_alg_aead_enc,
1481        .ivsize = AES_BLOCK_SIZE,
1482        .maxauthsize = SHA256_DIGEST_SIZE,
1483}, {
1484        .base = {
1485                .cra_name = "authenc(hmac(sha512),cbc(aes))",
1486                .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1487                .cra_priority = 4001,
1488                .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1489                .cra_blocksize = AES_BLOCK_SIZE,
1490                .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1491                .cra_module = THIS_MODULE,
1492        },
1493        .init = qat_alg_aead_sha512_init,
1494        .exit = qat_alg_aead_exit,
1495        .setkey = qat_alg_aead_setkey,
1496        .decrypt = qat_alg_aead_dec,
1497        .encrypt = qat_alg_aead_enc,
1498        .ivsize = AES_BLOCK_SIZE,
1499        .maxauthsize = SHA512_DIGEST_SIZE,
1500} };
1501
1502static struct skcipher_alg qat_skciphers[] = { {
1503        .base.cra_name = "cbc(aes)",
1504        .base.cra_driver_name = "qat_aes_cbc",
1505        .base.cra_priority = 4001,
1506        .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1507        .base.cra_blocksize = AES_BLOCK_SIZE,
1508        .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1509        .base.cra_alignmask = 0,
1510        .base.cra_module = THIS_MODULE,
1511
1512        .init = qat_alg_skcipher_init_tfm,
1513        .exit = qat_alg_skcipher_exit_tfm,
1514        .setkey = qat_alg_skcipher_cbc_setkey,
1515        .decrypt = qat_alg_skcipher_blk_decrypt,
1516        .encrypt = qat_alg_skcipher_blk_encrypt,
1517        .min_keysize = AES_MIN_KEY_SIZE,
1518        .max_keysize = AES_MAX_KEY_SIZE,
1519        .ivsize = AES_BLOCK_SIZE,
1520}, {
1521        .base.cra_name = "ctr(aes)",
1522        .base.cra_driver_name = "qat_aes_ctr",
1523        .base.cra_priority = 4001,
1524        .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1525        .base.cra_blocksize = 1,
1526        .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1527        .base.cra_alignmask = 0,
1528        .base.cra_module = THIS_MODULE,
1529
1530        .init = qat_alg_skcipher_init_tfm,
1531        .exit = qat_alg_skcipher_exit_tfm,
1532        .setkey = qat_alg_skcipher_ctr_setkey,
1533        .decrypt = qat_alg_skcipher_decrypt,
1534        .encrypt = qat_alg_skcipher_encrypt,
1535        .min_keysize = AES_MIN_KEY_SIZE,
1536        .max_keysize = AES_MAX_KEY_SIZE,
1537        .ivsize = AES_BLOCK_SIZE,
1538}, {
1539        .base.cra_name = "xts(aes)",
1540        .base.cra_driver_name = "qat_aes_xts",
1541        .base.cra_priority = 4001,
1542        .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1543                          CRYPTO_ALG_ALLOCATES_MEMORY,
1544        .base.cra_blocksize = AES_BLOCK_SIZE,
1545        .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1546        .base.cra_alignmask = 0,
1547        .base.cra_module = THIS_MODULE,
1548
1549        .init = qat_alg_skcipher_init_xts_tfm,
1550        .exit = qat_alg_skcipher_exit_xts_tfm,
1551        .setkey = qat_alg_skcipher_xts_setkey,
1552        .decrypt = qat_alg_skcipher_xts_decrypt,
1553        .encrypt = qat_alg_skcipher_xts_encrypt,
1554        .min_keysize = 2 * AES_MIN_KEY_SIZE,
1555        .max_keysize = 2 * AES_MAX_KEY_SIZE,
1556        .ivsize = AES_BLOCK_SIZE,
1557} };
1558
1559int qat_algs_register(void)
1560{
1561        int ret = 0;
1562
1563        mutex_lock(&algs_lock);
1564        if (++active_devs != 1)
1565                goto unlock;
1566
1567        ret = crypto_register_skciphers(qat_skciphers,
1568                                        ARRAY_SIZE(qat_skciphers));
1569        if (ret)
1570                goto unlock;
1571
1572        ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1573        if (ret)
1574                goto unreg_algs;
1575
1576unlock:
1577        mutex_unlock(&algs_lock);
1578        return ret;
1579
1580unreg_algs:
1581        crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1582        goto unlock;
1583}
1584
1585void qat_algs_unregister(void)
1586{
1587        mutex_lock(&algs_lock);
1588        if (--active_devs != 0)
1589                goto unlock;
1590
1591        crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1592        crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1593
1594unlock:
1595        mutex_unlock(&algs_lock);
1596}
1597