linux/drivers/crypto/qat/qat_common/qat_algs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
   2/* Copyright(c) 2014 - 2020 Intel Corporation */
   3#include <linux/module.h>
   4#include <linux/slab.h>
   5#include <linux/crypto.h>
   6#include <crypto/internal/aead.h>
   7#include <crypto/internal/skcipher.h>
   8#include <crypto/aes.h>
   9#include <crypto/sha1.h>
  10#include <crypto/sha2.h>
  11#include <crypto/hash.h>
  12#include <crypto/hmac.h>
  13#include <crypto/algapi.h>
  14#include <crypto/authenc.h>
  15#include <crypto/scatterwalk.h>
  16#include <crypto/xts.h>
  17#include <linux/dma-mapping.h>
  18#include "adf_accel_devices.h"
  19#include "adf_transport.h"
  20#include "adf_common_drv.h"
  21#include "qat_crypto.h"
  22#include "icp_qat_hw.h"
  23#include "icp_qat_fw.h"
  24#include "icp_qat_fw_la.h"
  25
  26#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
  27        ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
  28                                       ICP_QAT_HW_CIPHER_NO_CONVERT, \
  29                                       ICP_QAT_HW_CIPHER_ENCRYPT)
  30
  31#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
  32        ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
  33                                       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
  34                                       ICP_QAT_HW_CIPHER_DECRYPT)
  35
  36#define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
  37        ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
  38                                       ICP_QAT_HW_CIPHER_NO_CONVERT, \
  39                                       ICP_QAT_HW_CIPHER_DECRYPT)
  40
  41#define HW_CAP_AES_V2(accel_dev) \
  42        (GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
  43         ICP_ACCEL_CAPABILITIES_AES_V2)
  44
  45static DEFINE_MUTEX(algs_lock);
  46static unsigned int active_devs;
  47
  48struct qat_alg_buf {
  49        u32 len;
  50        u32 resrvd;
  51        u64 addr;
  52} __packed;
  53
  54struct qat_alg_buf_list {
  55        u64 resrvd;
  56        u32 num_bufs;
  57        u32 num_mapped_bufs;
  58        struct qat_alg_buf bufers[];
  59} __packed __aligned(64);
  60
  61/* Common content descriptor */
  62struct qat_alg_cd {
  63        union {
  64                struct qat_enc { /* Encrypt content desc */
  65                        struct icp_qat_hw_cipher_algo_blk cipher;
  66                        struct icp_qat_hw_auth_algo_blk hash;
  67                } qat_enc_cd;
  68                struct qat_dec { /* Decrypt content desc */
  69                        struct icp_qat_hw_auth_algo_blk hash;
  70                        struct icp_qat_hw_cipher_algo_blk cipher;
  71                } qat_dec_cd;
  72        };
  73} __aligned(64);
  74
  75struct qat_alg_aead_ctx {
  76        struct qat_alg_cd *enc_cd;
  77        struct qat_alg_cd *dec_cd;
  78        dma_addr_t enc_cd_paddr;
  79        dma_addr_t dec_cd_paddr;
  80        struct icp_qat_fw_la_bulk_req enc_fw_req;
  81        struct icp_qat_fw_la_bulk_req dec_fw_req;
  82        struct crypto_shash *hash_tfm;
  83        enum icp_qat_hw_auth_algo qat_hash_alg;
  84        struct qat_crypto_instance *inst;
  85        union {
  86                struct sha1_state sha1;
  87                struct sha256_state sha256;
  88                struct sha512_state sha512;
  89        };
  90        char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
  91        char opad[SHA512_BLOCK_SIZE];
  92};
  93
  94struct qat_alg_skcipher_ctx {
  95        struct icp_qat_hw_cipher_algo_blk *enc_cd;
  96        struct icp_qat_hw_cipher_algo_blk *dec_cd;
  97        dma_addr_t enc_cd_paddr;
  98        dma_addr_t dec_cd_paddr;
  99        struct icp_qat_fw_la_bulk_req enc_fw_req;
 100        struct icp_qat_fw_la_bulk_req dec_fw_req;
 101        struct qat_crypto_instance *inst;
 102        struct crypto_skcipher *ftfm;
 103        struct crypto_cipher *tweak;
 104        bool fallback;
 105        int mode;
 106};
 107
 108static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
 109{
 110        switch (qat_hash_alg) {
 111        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 112                return ICP_QAT_HW_SHA1_STATE1_SZ;
 113        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 114                return ICP_QAT_HW_SHA256_STATE1_SZ;
 115        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 116                return ICP_QAT_HW_SHA512_STATE1_SZ;
 117        default:
 118                return -EFAULT;
 119        }
 120        return -EFAULT;
 121}
 122
 123static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
 124                                  struct qat_alg_aead_ctx *ctx,
 125                                  const u8 *auth_key,
 126                                  unsigned int auth_keylen)
 127{
 128        SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
 129        int block_size = crypto_shash_blocksize(ctx->hash_tfm);
 130        int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
 131        __be32 *hash_state_out;
 132        __be64 *hash512_state_out;
 133        int i, offset;
 134
 135        memset(ctx->ipad, 0, block_size);
 136        memset(ctx->opad, 0, block_size);
 137        shash->tfm = ctx->hash_tfm;
 138
 139        if (auth_keylen > block_size) {
 140                int ret = crypto_shash_digest(shash, auth_key,
 141                                              auth_keylen, ctx->ipad);
 142                if (ret)
 143                        return ret;
 144
 145                memcpy(ctx->opad, ctx->ipad, digest_size);
 146        } else {
 147                memcpy(ctx->ipad, auth_key, auth_keylen);
 148                memcpy(ctx->opad, auth_key, auth_keylen);
 149        }
 150
 151        for (i = 0; i < block_size; i++) {
 152                char *ipad_ptr = ctx->ipad + i;
 153                char *opad_ptr = ctx->opad + i;
 154                *ipad_ptr ^= HMAC_IPAD_VALUE;
 155                *opad_ptr ^= HMAC_OPAD_VALUE;
 156        }
 157
 158        if (crypto_shash_init(shash))
 159                return -EFAULT;
 160
 161        if (crypto_shash_update(shash, ctx->ipad, block_size))
 162                return -EFAULT;
 163
 164        hash_state_out = (__be32 *)hash->sha.state1;
 165        hash512_state_out = (__be64 *)hash_state_out;
 166
 167        switch (ctx->qat_hash_alg) {
 168        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 169                if (crypto_shash_export(shash, &ctx->sha1))
 170                        return -EFAULT;
 171                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 172                        *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
 173                break;
 174        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 175                if (crypto_shash_export(shash, &ctx->sha256))
 176                        return -EFAULT;
 177                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 178                        *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
 179                break;
 180        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 181                if (crypto_shash_export(shash, &ctx->sha512))
 182                        return -EFAULT;
 183                for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
 184                        *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
 185                break;
 186        default:
 187                return -EFAULT;
 188        }
 189
 190        if (crypto_shash_init(shash))
 191                return -EFAULT;
 192
 193        if (crypto_shash_update(shash, ctx->opad, block_size))
 194                return -EFAULT;
 195
 196        offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
 197        if (offset < 0)
 198                return -EFAULT;
 199
 200        hash_state_out = (__be32 *)(hash->sha.state1 + offset);
 201        hash512_state_out = (__be64 *)hash_state_out;
 202
 203        switch (ctx->qat_hash_alg) {
 204        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 205                if (crypto_shash_export(shash, &ctx->sha1))
 206                        return -EFAULT;
 207                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 208                        *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
 209                break;
 210        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 211                if (crypto_shash_export(shash, &ctx->sha256))
 212                        return -EFAULT;
 213                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 214                        *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
 215                break;
 216        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 217                if (crypto_shash_export(shash, &ctx->sha512))
 218                        return -EFAULT;
 219                for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
 220                        *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
 221                break;
 222        default:
 223                return -EFAULT;
 224        }
 225        memzero_explicit(ctx->ipad, block_size);
 226        memzero_explicit(ctx->opad, block_size);
 227        return 0;
 228}
 229
 230static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
 231{
 232        header->hdr_flags =
 233                ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
 234        header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
 235        header->comn_req_flags =
 236                ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
 237                                            QAT_COMN_PTR_TYPE_SGL);
 238        ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
 239                                  ICP_QAT_FW_LA_PARTIAL_NONE);
 240        ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
 241                                           ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
 242        ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
 243                                ICP_QAT_FW_LA_NO_PROTO);
 244        ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
 245                                       ICP_QAT_FW_LA_NO_UPDATE_STATE);
 246}
 247
 248static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
 249                                         int alg,
 250                                         struct crypto_authenc_keys *keys,
 251                                         int mode)
 252{
 253        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
 254        unsigned int digestsize = crypto_aead_authsize(aead_tfm);
 255        struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
 256        struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
 257        struct icp_qat_hw_auth_algo_blk *hash =
 258                (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
 259                sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
 260        struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
 261        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
 262        struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
 263        void *ptr = &req_tmpl->cd_ctrl;
 264        struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
 265        struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
 266
 267        /* CD setup */
 268        cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
 269        memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
 270        hash->sha.inner_setup.auth_config.config =
 271                ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
 272                                             ctx->qat_hash_alg, digestsize);
 273        hash->sha.inner_setup.auth_counter.counter =
 274                cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
 275
 276        if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
 277                return -EFAULT;
 278
 279        /* Request setup */
 280        qat_alg_init_common_hdr(header);
 281        header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
 282        ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
 283                                           ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
 284        ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
 285                                   ICP_QAT_FW_LA_RET_AUTH_RES);
 286        ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
 287                                   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
 288        cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
 289        cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
 290
 291        /* Cipher CD config setup */
 292        cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
 293        cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
 294        cipher_cd_ctrl->cipher_cfg_offset = 0;
 295        ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 296        ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
 297        /* Auth CD config setup */
 298        hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
 299        hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
 300        hash_cd_ctrl->inner_res_sz = digestsize;
 301        hash_cd_ctrl->final_sz = digestsize;
 302
 303        switch (ctx->qat_hash_alg) {
 304        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 305                hash_cd_ctrl->inner_state1_sz =
 306                        round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
 307                hash_cd_ctrl->inner_state2_sz =
 308                        round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
 309                break;
 310        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 311                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
 312                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
 313                break;
 314        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 315                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
 316                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
 317                break;
 318        default:
 319                break;
 320        }
 321        hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
 322                        ((sizeof(struct icp_qat_hw_auth_setup) +
 323                         round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
 324        ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
 325        ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
 326        return 0;
 327}
 328
 329static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
 330                                         int alg,
 331                                         struct crypto_authenc_keys *keys,
 332                                         int mode)
 333{
 334        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
 335        unsigned int digestsize = crypto_aead_authsize(aead_tfm);
 336        struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
 337        struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
 338        struct icp_qat_hw_cipher_algo_blk *cipher =
 339                (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
 340                sizeof(struct icp_qat_hw_auth_setup) +
 341                roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
 342        struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
 343        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
 344        struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
 345        void *ptr = &req_tmpl->cd_ctrl;
 346        struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
 347        struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
 348        struct icp_qat_fw_la_auth_req_params *auth_param =
 349                (struct icp_qat_fw_la_auth_req_params *)
 350                ((char *)&req_tmpl->serv_specif_rqpars +
 351                sizeof(struct icp_qat_fw_la_cipher_req_params));
 352
 353        /* CD setup */
 354        cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
 355        memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
 356        hash->sha.inner_setup.auth_config.config =
 357                ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
 358                                             ctx->qat_hash_alg,
 359                                             digestsize);
 360        hash->sha.inner_setup.auth_counter.counter =
 361                cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
 362
 363        if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
 364                return -EFAULT;
 365
 366        /* Request setup */
 367        qat_alg_init_common_hdr(header);
 368        header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
 369        ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
 370                                           ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
 371        ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
 372                                   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
 373        ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
 374                                   ICP_QAT_FW_LA_CMP_AUTH_RES);
 375        cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
 376        cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
 377
 378        /* Cipher CD config setup */
 379        cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
 380        cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
 381        cipher_cd_ctrl->cipher_cfg_offset =
 382                (sizeof(struct icp_qat_hw_auth_setup) +
 383                 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
 384        ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 385        ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
 386
 387        /* Auth CD config setup */
 388        hash_cd_ctrl->hash_cfg_offset = 0;
 389        hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
 390        hash_cd_ctrl->inner_res_sz = digestsize;
 391        hash_cd_ctrl->final_sz = digestsize;
 392
 393        switch (ctx->qat_hash_alg) {
 394        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 395                hash_cd_ctrl->inner_state1_sz =
 396                        round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
 397                hash_cd_ctrl->inner_state2_sz =
 398                        round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
 399                break;
 400        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 401                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
 402                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
 403                break;
 404        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 405                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
 406                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
 407                break;
 408        default:
 409                break;
 410        }
 411
 412        hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
 413                        ((sizeof(struct icp_qat_hw_auth_setup) +
 414                         round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
 415        auth_param->auth_res_sz = digestsize;
 416        ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
 417        ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 418        return 0;
 419}
 420
 421static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
 422                                      struct icp_qat_fw_la_bulk_req *req,
 423                                      struct icp_qat_hw_cipher_algo_blk *cd,
 424                                      const u8 *key, unsigned int keylen)
 425{
 426        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
 427        struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
 428        struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
 429        bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
 430        int mode = ctx->mode;
 431
 432        qat_alg_init_common_hdr(header);
 433        header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
 434        cd_pars->u.s.content_desc_params_sz =
 435                                sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
 436
 437        if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
 438                ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
 439                                             ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
 440
 441                /* Store both XTS keys in CD, only the first key is sent
 442                 * to the HW, the second key is used for tweak calculation
 443                 */
 444                memcpy(cd->ucs_aes.key, key, keylen);
 445                keylen = keylen / 2;
 446        } else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
 447                ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
 448                                             ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
 449                keylen = round_up(keylen, 16);
 450                memcpy(cd->ucs_aes.key, key, keylen);
 451        } else {
 452                memcpy(cd->aes.key, key, keylen);
 453        }
 454
 455        /* Cipher CD config setup */
 456        cd_ctrl->cipher_key_sz = keylen >> 3;
 457        cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
 458        cd_ctrl->cipher_cfg_offset = 0;
 459        ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 460        ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
 461}
 462
 463static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
 464                                      int alg, const u8 *key,
 465                                      unsigned int keylen, int mode)
 466{
 467        struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
 468        struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
 469        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
 470
 471        qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
 472        cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
 473        enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
 474}
 475
 476static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen,
 477                                    u8 *key_reverse)
 478{
 479        struct crypto_aes_ctx aes_expanded;
 480        int nrounds;
 481        u8 *key;
 482
 483        aes_expandkey(&aes_expanded, key_forward, keylen);
 484        if (keylen == AES_KEYSIZE_128) {
 485                nrounds = 10;
 486                key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
 487                memcpy(key_reverse, key, AES_BLOCK_SIZE);
 488        } else {
 489                /* AES_KEYSIZE_256 */
 490                nrounds = 14;
 491                key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
 492                memcpy(key_reverse, key, AES_BLOCK_SIZE);
 493                memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE,
 494                       AES_BLOCK_SIZE);
 495        }
 496}
 497
 498static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
 499                                      int alg, const u8 *key,
 500                                      unsigned int keylen, int mode)
 501{
 502        struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
 503        struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
 504        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
 505        bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
 506
 507        qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
 508        cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
 509
 510        if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
 511                /* Key reversing not supported, set no convert */
 512                dec_cd->aes.cipher_config.val =
 513                                QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode);
 514
 515                /* In-place key reversal */
 516                qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2,
 517                                        dec_cd->ucs_aes.key);
 518        } else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) {
 519                dec_cd->aes.cipher_config.val =
 520                                        QAT_AES_HW_CONFIG_DEC(alg, mode);
 521        } else {
 522                dec_cd->aes.cipher_config.val =
 523                                        QAT_AES_HW_CONFIG_ENC(alg, mode);
 524        }
 525}
 526
 527static int qat_alg_validate_key(int key_len, int *alg, int mode)
 528{
 529        if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
 530                switch (key_len) {
 531                case AES_KEYSIZE_128:
 532                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
 533                        break;
 534                case AES_KEYSIZE_192:
 535                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
 536                        break;
 537                case AES_KEYSIZE_256:
 538                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
 539                        break;
 540                default:
 541                        return -EINVAL;
 542                }
 543        } else {
 544                switch (key_len) {
 545                case AES_KEYSIZE_128 << 1:
 546                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
 547                        break;
 548                case AES_KEYSIZE_256 << 1:
 549                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
 550                        break;
 551                default:
 552                        return -EINVAL;
 553                }
 554        }
 555        return 0;
 556}
 557
 558static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
 559                                      unsigned int keylen,  int mode)
 560{
 561        struct crypto_authenc_keys keys;
 562        int alg;
 563
 564        if (crypto_authenc_extractkeys(&keys, key, keylen))
 565                goto bad_key;
 566
 567        if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
 568                goto bad_key;
 569
 570        if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
 571                goto error;
 572
 573        if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
 574                goto error;
 575
 576        memzero_explicit(&keys, sizeof(keys));
 577        return 0;
 578bad_key:
 579        memzero_explicit(&keys, sizeof(keys));
 580        return -EINVAL;
 581error:
 582        memzero_explicit(&keys, sizeof(keys));
 583        return -EFAULT;
 584}
 585
 586static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
 587                                          const u8 *key,
 588                                          unsigned int keylen,
 589                                          int mode)
 590{
 591        int alg;
 592
 593        if (qat_alg_validate_key(keylen, &alg, mode))
 594                return -EINVAL;
 595
 596        qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
 597        qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
 598        return 0;
 599}
 600
 601static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
 602                              unsigned int keylen)
 603{
 604        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 605
 606        memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
 607        memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
 608        memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
 609        memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
 610
 611        return qat_alg_aead_init_sessions(tfm, key, keylen,
 612                                          ICP_QAT_HW_CIPHER_CBC_MODE);
 613}
 614
 615static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
 616                               unsigned int keylen)
 617{
 618        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 619        struct qat_crypto_instance *inst = NULL;
 620        int node = get_current_node();
 621        struct device *dev;
 622        int ret;
 623
 624        inst = qat_crypto_get_instance_node(node);
 625        if (!inst)
 626                return -EINVAL;
 627        dev = &GET_DEV(inst->accel_dev);
 628        ctx->inst = inst;
 629        ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
 630                                         &ctx->enc_cd_paddr,
 631                                         GFP_ATOMIC);
 632        if (!ctx->enc_cd) {
 633                ret = -ENOMEM;
 634                goto out_free_inst;
 635        }
 636        ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
 637                                         &ctx->dec_cd_paddr,
 638                                         GFP_ATOMIC);
 639        if (!ctx->dec_cd) {
 640                ret = -ENOMEM;
 641                goto out_free_enc;
 642        }
 643
 644        ret = qat_alg_aead_init_sessions(tfm, key, keylen,
 645                                         ICP_QAT_HW_CIPHER_CBC_MODE);
 646        if (ret)
 647                goto out_free_all;
 648
 649        return 0;
 650
 651out_free_all:
 652        memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
 653        dma_free_coherent(dev, sizeof(struct qat_alg_cd),
 654                          ctx->dec_cd, ctx->dec_cd_paddr);
 655        ctx->dec_cd = NULL;
 656out_free_enc:
 657        memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
 658        dma_free_coherent(dev, sizeof(struct qat_alg_cd),
 659                          ctx->enc_cd, ctx->enc_cd_paddr);
 660        ctx->enc_cd = NULL;
 661out_free_inst:
 662        ctx->inst = NULL;
 663        qat_crypto_put_instance(inst);
 664        return ret;
 665}
 666
 667static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 668                               unsigned int keylen)
 669{
 670        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 671
 672        if (ctx->enc_cd)
 673                return qat_alg_aead_rekey(tfm, key, keylen);
 674        else
 675                return qat_alg_aead_newkey(tfm, key, keylen);
 676}
 677
 678static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
 679                              struct qat_crypto_request *qat_req)
 680{
 681        struct device *dev = &GET_DEV(inst->accel_dev);
 682        struct qat_alg_buf_list *bl = qat_req->buf.bl;
 683        struct qat_alg_buf_list *blout = qat_req->buf.blout;
 684        dma_addr_t blp = qat_req->buf.blp;
 685        dma_addr_t blpout = qat_req->buf.bloutp;
 686        size_t sz = qat_req->buf.sz;
 687        size_t sz_out = qat_req->buf.sz_out;
 688        int i;
 689
 690        for (i = 0; i < bl->num_bufs; i++)
 691                dma_unmap_single(dev, bl->bufers[i].addr,
 692                                 bl->bufers[i].len, DMA_BIDIRECTIONAL);
 693
 694        dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
 695        kfree(bl);
 696        if (blp != blpout) {
 697                /* If out of place operation dma unmap only data */
 698                int bufless = blout->num_bufs - blout->num_mapped_bufs;
 699
 700                for (i = bufless; i < blout->num_bufs; i++) {
 701                        dma_unmap_single(dev, blout->bufers[i].addr,
 702                                         blout->bufers[i].len,
 703                                         DMA_BIDIRECTIONAL);
 704                }
 705                dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
 706                kfree(blout);
 707        }
 708}
 709
 710static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
 711                               struct scatterlist *sgl,
 712                               struct scatterlist *sglout,
 713                               struct qat_crypto_request *qat_req)
 714{
 715        struct device *dev = &GET_DEV(inst->accel_dev);
 716        int i, sg_nctr = 0;
 717        int n = sg_nents(sgl);
 718        struct qat_alg_buf_list *bufl;
 719        struct qat_alg_buf_list *buflout = NULL;
 720        dma_addr_t blp;
 721        dma_addr_t bloutp = 0;
 722        struct scatterlist *sg;
 723        size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
 724
 725        if (unlikely(!n))
 726                return -EINVAL;
 727
 728        bufl = kzalloc_node(sz, GFP_ATOMIC,
 729                            dev_to_node(&GET_DEV(inst->accel_dev)));
 730        if (unlikely(!bufl))
 731                return -ENOMEM;
 732
 733        blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
 734        if (unlikely(dma_mapping_error(dev, blp)))
 735                goto err_in;
 736
 737        for_each_sg(sgl, sg, n, i) {
 738                int y = sg_nctr;
 739
 740                if (!sg->length)
 741                        continue;
 742
 743                bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
 744                                                      sg->length,
 745                                                      DMA_BIDIRECTIONAL);
 746                bufl->bufers[y].len = sg->length;
 747                if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
 748                        goto err_in;
 749                sg_nctr++;
 750        }
 751        bufl->num_bufs = sg_nctr;
 752        qat_req->buf.bl = bufl;
 753        qat_req->buf.blp = blp;
 754        qat_req->buf.sz = sz;
 755        /* Handle out of place operation */
 756        if (sgl != sglout) {
 757                struct qat_alg_buf *bufers;
 758
 759                n = sg_nents(sglout);
 760                sz_out = struct_size(buflout, bufers, n + 1);
 761                sg_nctr = 0;
 762                buflout = kzalloc_node(sz_out, GFP_ATOMIC,
 763                                       dev_to_node(&GET_DEV(inst->accel_dev)));
 764                if (unlikely(!buflout))
 765                        goto err_in;
 766                bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
 767                if (unlikely(dma_mapping_error(dev, bloutp)))
 768                        goto err_out;
 769                bufers = buflout->bufers;
 770                for_each_sg(sglout, sg, n, i) {
 771                        int y = sg_nctr;
 772
 773                        if (!sg->length)
 774                                continue;
 775
 776                        bufers[y].addr = dma_map_single(dev, sg_virt(sg),
 777                                                        sg->length,
 778                                                        DMA_BIDIRECTIONAL);
 779                        if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
 780                                goto err_out;
 781                        bufers[y].len = sg->length;
 782                        sg_nctr++;
 783                }
 784                buflout->num_bufs = sg_nctr;
 785                buflout->num_mapped_bufs = sg_nctr;
 786                qat_req->buf.blout = buflout;
 787                qat_req->buf.bloutp = bloutp;
 788                qat_req->buf.sz_out = sz_out;
 789        } else {
 790                /* Otherwise set the src and dst to the same address */
 791                qat_req->buf.bloutp = qat_req->buf.blp;
 792                qat_req->buf.sz_out = 0;
 793        }
 794        return 0;
 795
 796err_out:
 797        n = sg_nents(sglout);
 798        for (i = 0; i < n; i++)
 799                if (!dma_mapping_error(dev, buflout->bufers[i].addr))
 800                        dma_unmap_single(dev, buflout->bufers[i].addr,
 801                                         buflout->bufers[i].len,
 802                                         DMA_BIDIRECTIONAL);
 803        if (!dma_mapping_error(dev, bloutp))
 804                dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
 805        kfree(buflout);
 806
 807err_in:
 808        n = sg_nents(sgl);
 809        for (i = 0; i < n; i++)
 810                if (!dma_mapping_error(dev, bufl->bufers[i].addr))
 811                        dma_unmap_single(dev, bufl->bufers[i].addr,
 812                                         bufl->bufers[i].len,
 813                                         DMA_BIDIRECTIONAL);
 814
 815        if (!dma_mapping_error(dev, blp))
 816                dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
 817        kfree(bufl);
 818
 819        dev_err(dev, "Failed to map buf for dma\n");
 820        return -ENOMEM;
 821}
 822
 823static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
 824                                  struct qat_crypto_request *qat_req)
 825{
 826        struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
 827        struct qat_crypto_instance *inst = ctx->inst;
 828        struct aead_request *areq = qat_req->aead_req;
 829        u8 stat_filed = qat_resp->comn_resp.comn_status;
 830        int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
 831
 832        qat_alg_free_bufl(inst, qat_req);
 833        if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
 834                res = -EBADMSG;
 835        areq->base.complete(&areq->base, res);
 836}
 837
 838static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
 839{
 840        struct skcipher_request *sreq = qat_req->skcipher_req;
 841        u64 iv_lo_prev;
 842        u64 iv_lo;
 843        u64 iv_hi;
 844
 845        memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
 846
 847        iv_lo = be64_to_cpu(qat_req->iv_lo);
 848        iv_hi = be64_to_cpu(qat_req->iv_hi);
 849
 850        iv_lo_prev = iv_lo;
 851        iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
 852        if (iv_lo < iv_lo_prev)
 853                iv_hi++;
 854
 855        qat_req->iv_lo = cpu_to_be64(iv_lo);
 856        qat_req->iv_hi = cpu_to_be64(iv_hi);
 857}
 858
 859static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
 860{
 861        struct skcipher_request *sreq = qat_req->skcipher_req;
 862        int offset = sreq->cryptlen - AES_BLOCK_SIZE;
 863        struct scatterlist *sgl;
 864
 865        if (qat_req->encryption)
 866                sgl = sreq->dst;
 867        else
 868                sgl = sreq->src;
 869
 870        scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
 871}
 872
 873static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
 874{
 875        struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
 876        struct device *dev = &GET_DEV(ctx->inst->accel_dev);
 877
 878        switch (ctx->mode) {
 879        case ICP_QAT_HW_CIPHER_CTR_MODE:
 880                qat_alg_update_iv_ctr_mode(qat_req);
 881                break;
 882        case ICP_QAT_HW_CIPHER_CBC_MODE:
 883                qat_alg_update_iv_cbc_mode(qat_req);
 884                break;
 885        case ICP_QAT_HW_CIPHER_XTS_MODE:
 886                break;
 887        default:
 888                dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
 889                         ctx->mode);
 890        }
 891}
 892
 893static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
 894                                      struct qat_crypto_request *qat_req)
 895{
 896        struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
 897        struct qat_crypto_instance *inst = ctx->inst;
 898        struct skcipher_request *sreq = qat_req->skcipher_req;
 899        u8 stat_filed = qat_resp->comn_resp.comn_status;
 900        int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
 901
 902        qat_alg_free_bufl(inst, qat_req);
 903        if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
 904                res = -EINVAL;
 905
 906        if (qat_req->encryption)
 907                qat_alg_update_iv(qat_req);
 908
 909        memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
 910
 911        sreq->base.complete(&sreq->base, res);
 912}
 913
 914void qat_alg_callback(void *resp)
 915{
 916        struct icp_qat_fw_la_resp *qat_resp = resp;
 917        struct qat_crypto_request *qat_req =
 918                                (void *)(__force long)qat_resp->opaque_data;
 919
 920        qat_req->cb(qat_resp, qat_req);
 921}
 922
 923static int qat_alg_aead_dec(struct aead_request *areq)
 924{
 925        struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
 926        struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
 927        struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
 928        struct qat_crypto_request *qat_req = aead_request_ctx(areq);
 929        struct icp_qat_fw_la_cipher_req_params *cipher_param;
 930        struct icp_qat_fw_la_auth_req_params *auth_param;
 931        struct icp_qat_fw_la_bulk_req *msg;
 932        int digst_size = crypto_aead_authsize(aead_tfm);
 933        int ret, ctr = 0;
 934        u32 cipher_len;
 935
 936        cipher_len = areq->cryptlen - digst_size;
 937        if (cipher_len % AES_BLOCK_SIZE != 0)
 938                return -EINVAL;
 939
 940        ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
 941        if (unlikely(ret))
 942                return ret;
 943
 944        msg = &qat_req->req;
 945        *msg = ctx->dec_fw_req;
 946        qat_req->aead_ctx = ctx;
 947        qat_req->aead_req = areq;
 948        qat_req->cb = qat_aead_alg_callback;
 949        qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
 950        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
 951        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
 952        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
 953        cipher_param->cipher_length = cipher_len;
 954        cipher_param->cipher_offset = areq->assoclen;
 955        memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
 956        auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
 957        auth_param->auth_off = 0;
 958        auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
 959        do {
 960                ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
 961        } while (ret == -EAGAIN && ctr++ < 10);
 962
 963        if (ret == -EAGAIN) {
 964                qat_alg_free_bufl(ctx->inst, qat_req);
 965                return -EBUSY;
 966        }
 967        return -EINPROGRESS;
 968}
 969
 970static int qat_alg_aead_enc(struct aead_request *areq)
 971{
 972        struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
 973        struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
 974        struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
 975        struct qat_crypto_request *qat_req = aead_request_ctx(areq);
 976        struct icp_qat_fw_la_cipher_req_params *cipher_param;
 977        struct icp_qat_fw_la_auth_req_params *auth_param;
 978        struct icp_qat_fw_la_bulk_req *msg;
 979        u8 *iv = areq->iv;
 980        int ret, ctr = 0;
 981
 982        if (areq->cryptlen % AES_BLOCK_SIZE != 0)
 983                return -EINVAL;
 984
 985        ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
 986        if (unlikely(ret))
 987                return ret;
 988
 989        msg = &qat_req->req;
 990        *msg = ctx->enc_fw_req;
 991        qat_req->aead_ctx = ctx;
 992        qat_req->aead_req = areq;
 993        qat_req->cb = qat_aead_alg_callback;
 994        qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
 995        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
 996        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
 997        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
 998        auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
 999
1000        memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
1001        cipher_param->cipher_length = areq->cryptlen;
1002        cipher_param->cipher_offset = areq->assoclen;
1003
1004        auth_param->auth_off = 0;
1005        auth_param->auth_len = areq->assoclen + areq->cryptlen;
1006
1007        do {
1008                ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1009        } while (ret == -EAGAIN && ctr++ < 10);
1010
1011        if (ret == -EAGAIN) {
1012                qat_alg_free_bufl(ctx->inst, qat_req);
1013                return -EBUSY;
1014        }
1015        return -EINPROGRESS;
1016}
1017
1018static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
1019                                  const u8 *key, unsigned int keylen,
1020                                  int mode)
1021{
1022        memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1023        memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
1024        memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
1025        memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
1026
1027        return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
1028}
1029
1030static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
1031                                   const u8 *key, unsigned int keylen,
1032                                   int mode)
1033{
1034        struct qat_crypto_instance *inst = NULL;
1035        struct device *dev;
1036        int node = get_current_node();
1037        int ret;
1038
1039        inst = qat_crypto_get_instance_node(node);
1040        if (!inst)
1041                return -EINVAL;
1042        dev = &GET_DEV(inst->accel_dev);
1043        ctx->inst = inst;
1044        ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
1045                                         &ctx->enc_cd_paddr,
1046                                         GFP_ATOMIC);
1047        if (!ctx->enc_cd) {
1048                ret = -ENOMEM;
1049                goto out_free_instance;
1050        }
1051        ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
1052                                         &ctx->dec_cd_paddr,
1053                                         GFP_ATOMIC);
1054        if (!ctx->dec_cd) {
1055                ret = -ENOMEM;
1056                goto out_free_enc;
1057        }
1058
1059        ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
1060        if (ret)
1061                goto out_free_all;
1062
1063        return 0;
1064
1065out_free_all:
1066        memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
1067        dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1068                          ctx->dec_cd, ctx->dec_cd_paddr);
1069        ctx->dec_cd = NULL;
1070out_free_enc:
1071        memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1072        dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1073                          ctx->enc_cd, ctx->enc_cd_paddr);
1074        ctx->enc_cd = NULL;
1075out_free_instance:
1076        ctx->inst = NULL;
1077        qat_crypto_put_instance(inst);
1078        return ret;
1079}
1080
1081static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
1082                                   const u8 *key, unsigned int keylen,
1083                                   int mode)
1084{
1085        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1086
1087        ctx->mode = mode;
1088
1089        if (ctx->enc_cd)
1090                return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
1091        else
1092                return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
1093}
1094
1095static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
1096                                       const u8 *key, unsigned int keylen)
1097{
1098        return qat_alg_skcipher_setkey(tfm, key, keylen,
1099                                       ICP_QAT_HW_CIPHER_CBC_MODE);
1100}
1101
1102static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
1103                                       const u8 *key, unsigned int keylen)
1104{
1105        return qat_alg_skcipher_setkey(tfm, key, keylen,
1106                                       ICP_QAT_HW_CIPHER_CTR_MODE);
1107}
1108
1109static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
1110                                       const u8 *key, unsigned int keylen)
1111{
1112        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1113        int ret;
1114
1115        ret = xts_verify_key(tfm, key, keylen);
1116        if (ret)
1117                return ret;
1118
1119        if (keylen >> 1 == AES_KEYSIZE_192) {
1120                ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
1121                if (ret)
1122                        return ret;
1123
1124                ctx->fallback = true;
1125
1126                return 0;
1127        }
1128
1129        ctx->fallback = false;
1130
1131        ret = qat_alg_skcipher_setkey(tfm, key, keylen,
1132                                      ICP_QAT_HW_CIPHER_XTS_MODE);
1133        if (ret)
1134                return ret;
1135
1136        if (HW_CAP_AES_V2(ctx->inst->accel_dev))
1137                ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2),
1138                                           keylen / 2);
1139
1140        return ret;
1141}
1142
1143static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req)
1144{
1145        struct icp_qat_fw_la_cipher_req_params *cipher_param;
1146        struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
1147        bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
1148        u8 *iv = qat_req->skcipher_req->iv;
1149
1150        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1151
1152        if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE)
1153                crypto_cipher_encrypt_one(ctx->tweak,
1154                                          (u8 *)cipher_param->u.cipher_IV_array,
1155                                          iv);
1156        else
1157                memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
1158}
1159
1160static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1161{
1162        struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1163        struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1164        struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1165        struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1166        struct icp_qat_fw_la_cipher_req_params *cipher_param;
1167        struct icp_qat_fw_la_bulk_req *msg;
1168        int ret, ctr = 0;
1169
1170        if (req->cryptlen == 0)
1171                return 0;
1172
1173        ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1174        if (unlikely(ret))
1175                return ret;
1176
1177        msg = &qat_req->req;
1178        *msg = ctx->enc_fw_req;
1179        qat_req->skcipher_ctx = ctx;
1180        qat_req->skcipher_req = req;
1181        qat_req->cb = qat_skcipher_alg_callback;
1182        qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1183        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1184        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1185        qat_req->encryption = true;
1186        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1187        cipher_param->cipher_length = req->cryptlen;
1188        cipher_param->cipher_offset = 0;
1189
1190        qat_alg_set_req_iv(qat_req);
1191
1192        do {
1193                ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1194        } while (ret == -EAGAIN && ctr++ < 10);
1195
1196        if (ret == -EAGAIN) {
1197                qat_alg_free_bufl(ctx->inst, qat_req);
1198                return -EBUSY;
1199        }
1200        return -EINPROGRESS;
1201}
1202
1203static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1204{
1205        if (req->cryptlen % AES_BLOCK_SIZE != 0)
1206                return -EINVAL;
1207
1208        return qat_alg_skcipher_encrypt(req);
1209}
1210
1211static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1212{
1213        struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1214        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1215        struct skcipher_request *nreq = skcipher_request_ctx(req);
1216
1217        if (req->cryptlen < XTS_BLOCK_SIZE)
1218                return -EINVAL;
1219
1220        if (ctx->fallback) {
1221                memcpy(nreq, req, sizeof(*req));
1222                skcipher_request_set_tfm(nreq, ctx->ftfm);
1223                return crypto_skcipher_encrypt(nreq);
1224        }
1225
1226        return qat_alg_skcipher_encrypt(req);
1227}
1228
1229static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1230{
1231        struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1232        struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1233        struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1234        struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1235        struct icp_qat_fw_la_cipher_req_params *cipher_param;
1236        struct icp_qat_fw_la_bulk_req *msg;
1237        int ret, ctr = 0;
1238
1239        if (req->cryptlen == 0)
1240                return 0;
1241
1242        ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1243        if (unlikely(ret))
1244                return ret;
1245
1246        msg = &qat_req->req;
1247        *msg = ctx->dec_fw_req;
1248        qat_req->skcipher_ctx = ctx;
1249        qat_req->skcipher_req = req;
1250        qat_req->cb = qat_skcipher_alg_callback;
1251        qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1252        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1253        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1254        qat_req->encryption = false;
1255        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1256        cipher_param->cipher_length = req->cryptlen;
1257        cipher_param->cipher_offset = 0;
1258
1259        qat_alg_set_req_iv(qat_req);
1260        qat_alg_update_iv(qat_req);
1261
1262        do {
1263                ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1264        } while (ret == -EAGAIN && ctr++ < 10);
1265
1266        if (ret == -EAGAIN) {
1267                qat_alg_free_bufl(ctx->inst, qat_req);
1268                return -EBUSY;
1269        }
1270        return -EINPROGRESS;
1271}
1272
1273static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1274{
1275        if (req->cryptlen % AES_BLOCK_SIZE != 0)
1276                return -EINVAL;
1277
1278        return qat_alg_skcipher_decrypt(req);
1279}
1280
1281static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1282{
1283        struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1284        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1285        struct skcipher_request *nreq = skcipher_request_ctx(req);
1286
1287        if (req->cryptlen < XTS_BLOCK_SIZE)
1288                return -EINVAL;
1289
1290        if (ctx->fallback) {
1291                memcpy(nreq, req, sizeof(*req));
1292                skcipher_request_set_tfm(nreq, ctx->ftfm);
1293                return crypto_skcipher_decrypt(nreq);
1294        }
1295
1296        return qat_alg_skcipher_decrypt(req);
1297}
1298
1299static int qat_alg_aead_init(struct crypto_aead *tfm,
1300                             enum icp_qat_hw_auth_algo hash,
1301                             const char *hash_name)
1302{
1303        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1304
1305        ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1306        if (IS_ERR(ctx->hash_tfm))
1307                return PTR_ERR(ctx->hash_tfm);
1308        ctx->qat_hash_alg = hash;
1309        crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1310        return 0;
1311}
1312
1313static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1314{
1315        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1316}
1317
1318static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1319{
1320        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1321}
1322
1323static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1324{
1325        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1326}
1327
1328static void qat_alg_aead_exit(struct crypto_aead *tfm)
1329{
1330        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1331        struct qat_crypto_instance *inst = ctx->inst;
1332        struct device *dev;
1333
1334        crypto_free_shash(ctx->hash_tfm);
1335
1336        if (!inst)
1337                return;
1338
1339        dev = &GET_DEV(inst->accel_dev);
1340        if (ctx->enc_cd) {
1341                memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1342                dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1343                                  ctx->enc_cd, ctx->enc_cd_paddr);
1344        }
1345        if (ctx->dec_cd) {
1346                memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1347                dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1348                                  ctx->dec_cd, ctx->dec_cd_paddr);
1349        }
1350        qat_crypto_put_instance(inst);
1351}
1352
1353static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1354{
1355        crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1356        return 0;
1357}
1358
1359static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1360{
1361        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1362        int reqsize;
1363
1364        ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1365                                          CRYPTO_ALG_NEED_FALLBACK);
1366        if (IS_ERR(ctx->ftfm))
1367                return PTR_ERR(ctx->ftfm);
1368
1369        ctx->tweak = crypto_alloc_cipher("aes", 0, 0);
1370        if (IS_ERR(ctx->tweak)) {
1371                crypto_free_skcipher(ctx->ftfm);
1372                return PTR_ERR(ctx->tweak);
1373        }
1374
1375        reqsize = max(sizeof(struct qat_crypto_request),
1376                      sizeof(struct skcipher_request) +
1377                      crypto_skcipher_reqsize(ctx->ftfm));
1378        crypto_skcipher_set_reqsize(tfm, reqsize);
1379
1380        return 0;
1381}
1382
1383static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1384{
1385        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1386        struct qat_crypto_instance *inst = ctx->inst;
1387        struct device *dev;
1388
1389        if (!inst)
1390                return;
1391
1392        dev = &GET_DEV(inst->accel_dev);
1393        if (ctx->enc_cd) {
1394                memset(ctx->enc_cd, 0,
1395                       sizeof(struct icp_qat_hw_cipher_algo_blk));
1396                dma_free_coherent(dev,
1397                                  sizeof(struct icp_qat_hw_cipher_algo_blk),
1398                                  ctx->enc_cd, ctx->enc_cd_paddr);
1399        }
1400        if (ctx->dec_cd) {
1401                memset(ctx->dec_cd, 0,
1402                       sizeof(struct icp_qat_hw_cipher_algo_blk));
1403                dma_free_coherent(dev,
1404                                  sizeof(struct icp_qat_hw_cipher_algo_blk),
1405                                  ctx->dec_cd, ctx->dec_cd_paddr);
1406        }
1407        qat_crypto_put_instance(inst);
1408}
1409
1410static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1411{
1412        struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1413
1414        if (ctx->ftfm)
1415                crypto_free_skcipher(ctx->ftfm);
1416
1417        if (ctx->tweak)
1418                crypto_free_cipher(ctx->tweak);
1419
1420        qat_alg_skcipher_exit_tfm(tfm);
1421}
1422
1423static struct aead_alg qat_aeads[] = { {
1424        .base = {
1425                .cra_name = "authenc(hmac(sha1),cbc(aes))",
1426                .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1427                .cra_priority = 4001,
1428                .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1429                .cra_blocksize = AES_BLOCK_SIZE,
1430                .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1431                .cra_module = THIS_MODULE,
1432        },
1433        .init = qat_alg_aead_sha1_init,
1434        .exit = qat_alg_aead_exit,
1435        .setkey = qat_alg_aead_setkey,
1436        .decrypt = qat_alg_aead_dec,
1437        .encrypt = qat_alg_aead_enc,
1438        .ivsize = AES_BLOCK_SIZE,
1439        .maxauthsize = SHA1_DIGEST_SIZE,
1440}, {
1441        .base = {
1442                .cra_name = "authenc(hmac(sha256),cbc(aes))",
1443                .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1444                .cra_priority = 4001,
1445                .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1446                .cra_blocksize = AES_BLOCK_SIZE,
1447                .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1448                .cra_module = THIS_MODULE,
1449        },
1450        .init = qat_alg_aead_sha256_init,
1451        .exit = qat_alg_aead_exit,
1452        .setkey = qat_alg_aead_setkey,
1453        .decrypt = qat_alg_aead_dec,
1454        .encrypt = qat_alg_aead_enc,
1455        .ivsize = AES_BLOCK_SIZE,
1456        .maxauthsize = SHA256_DIGEST_SIZE,
1457}, {
1458        .base = {
1459                .cra_name = "authenc(hmac(sha512),cbc(aes))",
1460                .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1461                .cra_priority = 4001,
1462                .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1463                .cra_blocksize = AES_BLOCK_SIZE,
1464                .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1465                .cra_module = THIS_MODULE,
1466        },
1467        .init = qat_alg_aead_sha512_init,
1468        .exit = qat_alg_aead_exit,
1469        .setkey = qat_alg_aead_setkey,
1470        .decrypt = qat_alg_aead_dec,
1471        .encrypt = qat_alg_aead_enc,
1472        .ivsize = AES_BLOCK_SIZE,
1473        .maxauthsize = SHA512_DIGEST_SIZE,
1474} };
1475
1476static struct skcipher_alg qat_skciphers[] = { {
1477        .base.cra_name = "cbc(aes)",
1478        .base.cra_driver_name = "qat_aes_cbc",
1479        .base.cra_priority = 4001,
1480        .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1481        .base.cra_blocksize = AES_BLOCK_SIZE,
1482        .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1483        .base.cra_alignmask = 0,
1484        .base.cra_module = THIS_MODULE,
1485
1486        .init = qat_alg_skcipher_init_tfm,
1487        .exit = qat_alg_skcipher_exit_tfm,
1488        .setkey = qat_alg_skcipher_cbc_setkey,
1489        .decrypt = qat_alg_skcipher_blk_decrypt,
1490        .encrypt = qat_alg_skcipher_blk_encrypt,
1491        .min_keysize = AES_MIN_KEY_SIZE,
1492        .max_keysize = AES_MAX_KEY_SIZE,
1493        .ivsize = AES_BLOCK_SIZE,
1494}, {
1495        .base.cra_name = "ctr(aes)",
1496        .base.cra_driver_name = "qat_aes_ctr",
1497        .base.cra_priority = 4001,
1498        .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1499        .base.cra_blocksize = 1,
1500        .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1501        .base.cra_alignmask = 0,
1502        .base.cra_module = THIS_MODULE,
1503
1504        .init = qat_alg_skcipher_init_tfm,
1505        .exit = qat_alg_skcipher_exit_tfm,
1506        .setkey = qat_alg_skcipher_ctr_setkey,
1507        .decrypt = qat_alg_skcipher_decrypt,
1508        .encrypt = qat_alg_skcipher_encrypt,
1509        .min_keysize = AES_MIN_KEY_SIZE,
1510        .max_keysize = AES_MAX_KEY_SIZE,
1511        .ivsize = AES_BLOCK_SIZE,
1512}, {
1513        .base.cra_name = "xts(aes)",
1514        .base.cra_driver_name = "qat_aes_xts",
1515        .base.cra_priority = 4001,
1516        .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1517                          CRYPTO_ALG_ALLOCATES_MEMORY,
1518        .base.cra_blocksize = AES_BLOCK_SIZE,
1519        .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1520        .base.cra_alignmask = 0,
1521        .base.cra_module = THIS_MODULE,
1522
1523        .init = qat_alg_skcipher_init_xts_tfm,
1524        .exit = qat_alg_skcipher_exit_xts_tfm,
1525        .setkey = qat_alg_skcipher_xts_setkey,
1526        .decrypt = qat_alg_skcipher_xts_decrypt,
1527        .encrypt = qat_alg_skcipher_xts_encrypt,
1528        .min_keysize = 2 * AES_MIN_KEY_SIZE,
1529        .max_keysize = 2 * AES_MAX_KEY_SIZE,
1530        .ivsize = AES_BLOCK_SIZE,
1531} };
1532
1533int qat_algs_register(void)
1534{
1535        int ret = 0;
1536
1537        mutex_lock(&algs_lock);
1538        if (++active_devs != 1)
1539                goto unlock;
1540
1541        ret = crypto_register_skciphers(qat_skciphers,
1542                                        ARRAY_SIZE(qat_skciphers));
1543        if (ret)
1544                goto unlock;
1545
1546        ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1547        if (ret)
1548                goto unreg_algs;
1549
1550unlock:
1551        mutex_unlock(&algs_lock);
1552        return ret;
1553
1554unreg_algs:
1555        crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1556        goto unlock;
1557}
1558
1559void qat_algs_unregister(void)
1560{
1561        mutex_lock(&algs_lock);
1562        if (--active_devs != 0)
1563                goto unlock;
1564
1565        crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1566        crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1567
1568unlock:
1569        mutex_unlock(&algs_lock);
1570}
1571