linux/drivers/crypto/qat/qat_common/qat_algs.c
<<
>>
Prefs
   1/*
   2  This file is provided under a dual BSD/GPLv2 license.  When using or
   3  redistributing this file, you may do so under either license.
   4
   5  GPL LICENSE SUMMARY
   6  Copyright(c) 2014 Intel Corporation.
   7  This program is free software; you can redistribute it and/or modify
   8  it under the terms of version 2 of the GNU General Public License as
   9  published by the Free Software Foundation.
  10
  11  This program is distributed in the hope that it will be useful, but
  12  WITHOUT ANY WARRANTY; without even the implied warranty of
  13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14  General Public License for more details.
  15
  16  Contact Information:
  17  qat-linux@intel.com
  18
  19  BSD LICENSE
  20  Copyright(c) 2014 Intel Corporation.
  21  Redistribution and use in source and binary forms, with or without
  22  modification, are permitted provided that the following conditions
  23  are met:
  24
  25    * Redistributions of source code must retain the above copyright
  26      notice, this list of conditions and the following disclaimer.
  27    * Redistributions in binary form must reproduce the above copyright
  28      notice, this list of conditions and the following disclaimer in
  29      the documentation and/or other materials provided with the
  30      distribution.
  31    * Neither the name of Intel Corporation nor the names of its
  32      contributors may be used to endorse or promote products derived
  33      from this software without specific prior written permission.
  34
  35  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  36  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  37  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  38  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  39  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  40  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  41  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  45  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46*/
  47#include <linux/module.h>
  48#include <linux/slab.h>
  49#include <linux/crypto.h>
  50#include <crypto/internal/aead.h>
  51#include <crypto/aes.h>
  52#include <crypto/sha.h>
  53#include <crypto/hash.h>
  54#include <crypto/algapi.h>
  55#include <crypto/authenc.h>
  56#include <crypto/rng.h>
  57#include <linux/dma-mapping.h>
  58#include "adf_accel_devices.h"
  59#include "adf_transport.h"
  60#include "adf_common_drv.h"
  61#include "qat_crypto.h"
  62#include "icp_qat_hw.h"
  63#include "icp_qat_fw.h"
  64#include "icp_qat_fw_la.h"
  65
  66#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
  67        ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
  68                                       ICP_QAT_HW_CIPHER_NO_CONVERT, \
  69                                       ICP_QAT_HW_CIPHER_ENCRYPT)
  70
  71#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
  72        ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
  73                                       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
  74                                       ICP_QAT_HW_CIPHER_DECRYPT)
  75
  76static DEFINE_MUTEX(algs_lock);
  77static unsigned int active_devs;
  78
  79struct qat_alg_buf {
  80        uint32_t len;
  81        uint32_t resrvd;
  82        uint64_t addr;
  83} __packed;
  84
  85struct qat_alg_buf_list {
  86        uint64_t resrvd;
  87        uint32_t num_bufs;
  88        uint32_t num_mapped_bufs;
  89        struct qat_alg_buf bufers[];
  90} __packed __aligned(64);
  91
  92/* Common content descriptor */
  93struct qat_alg_cd {
  94        union {
  95                struct qat_enc { /* Encrypt content desc */
  96                        struct icp_qat_hw_cipher_algo_blk cipher;
  97                        struct icp_qat_hw_auth_algo_blk hash;
  98                } qat_enc_cd;
  99                struct qat_dec { /* Decrytp content desc */
 100                        struct icp_qat_hw_auth_algo_blk hash;
 101                        struct icp_qat_hw_cipher_algo_blk cipher;
 102                } qat_dec_cd;
 103        };
 104} __aligned(64);
 105
 106struct qat_alg_aead_ctx {
 107        struct qat_alg_cd *enc_cd;
 108        struct qat_alg_cd *dec_cd;
 109        dma_addr_t enc_cd_paddr;
 110        dma_addr_t dec_cd_paddr;
 111        struct icp_qat_fw_la_bulk_req enc_fw_req;
 112        struct icp_qat_fw_la_bulk_req dec_fw_req;
 113        struct crypto_shash *hash_tfm;
 114        enum icp_qat_hw_auth_algo qat_hash_alg;
 115        struct qat_crypto_instance *inst;
 116        struct crypto_tfm *tfm;
 117        uint8_t salt[AES_BLOCK_SIZE];
 118        spinlock_t lock;        /* protects qat_alg_aead_ctx struct */
 119};
 120
 121struct qat_alg_ablkcipher_ctx {
 122        struct icp_qat_hw_cipher_algo_blk *enc_cd;
 123        struct icp_qat_hw_cipher_algo_blk *dec_cd;
 124        dma_addr_t enc_cd_paddr;
 125        dma_addr_t dec_cd_paddr;
 126        struct icp_qat_fw_la_bulk_req enc_fw_req;
 127        struct icp_qat_fw_la_bulk_req dec_fw_req;
 128        struct qat_crypto_instance *inst;
 129        struct crypto_tfm *tfm;
 130        spinlock_t lock;        /* protects qat_alg_ablkcipher_ctx struct */
 131};
 132
 133static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
 134{
 135        switch (qat_hash_alg) {
 136        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 137                return ICP_QAT_HW_SHA1_STATE1_SZ;
 138        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 139                return ICP_QAT_HW_SHA256_STATE1_SZ;
 140        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 141                return ICP_QAT_HW_SHA512_STATE1_SZ;
 142        default:
 143                return -EFAULT;
 144        };
 145        return -EFAULT;
 146}
 147
 148static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
 149                                  struct qat_alg_aead_ctx *ctx,
 150                                  const uint8_t *auth_key,
 151                                  unsigned int auth_keylen)
 152{
 153        SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
 154        struct sha1_state sha1;
 155        struct sha256_state sha256;
 156        struct sha512_state sha512;
 157        int block_size = crypto_shash_blocksize(ctx->hash_tfm);
 158        int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
 159        char ipad[block_size];
 160        char opad[block_size];
 161        __be32 *hash_state_out;
 162        __be64 *hash512_state_out;
 163        int i, offset;
 164
 165        memset(ipad, 0, block_size);
 166        memset(opad, 0, block_size);
 167        shash->tfm = ctx->hash_tfm;
 168        shash->flags = 0x0;
 169
 170        if (auth_keylen > block_size) {
 171                int ret = crypto_shash_digest(shash, auth_key,
 172                                              auth_keylen, ipad);
 173                if (ret)
 174                        return ret;
 175
 176                memcpy(opad, ipad, digest_size);
 177        } else {
 178                memcpy(ipad, auth_key, auth_keylen);
 179                memcpy(opad, auth_key, auth_keylen);
 180        }
 181
 182        for (i = 0; i < block_size; i++) {
 183                char *ipad_ptr = ipad + i;
 184                char *opad_ptr = opad + i;
 185                *ipad_ptr ^= 0x36;
 186                *opad_ptr ^= 0x5C;
 187        }
 188
 189        if (crypto_shash_init(shash))
 190                return -EFAULT;
 191
 192        if (crypto_shash_update(shash, ipad, block_size))
 193                return -EFAULT;
 194
 195        hash_state_out = (__be32 *)hash->sha.state1;
 196        hash512_state_out = (__be64 *)hash_state_out;
 197
 198        switch (ctx->qat_hash_alg) {
 199        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 200                if (crypto_shash_export(shash, &sha1))
 201                        return -EFAULT;
 202                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 203                        *hash_state_out = cpu_to_be32(*(sha1.state + i));
 204                break;
 205        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 206                if (crypto_shash_export(shash, &sha256))
 207                        return -EFAULT;
 208                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 209                        *hash_state_out = cpu_to_be32(*(sha256.state + i));
 210                break;
 211        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 212                if (crypto_shash_export(shash, &sha512))
 213                        return -EFAULT;
 214                for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
 215                        *hash512_state_out = cpu_to_be64(*(sha512.state + i));
 216                break;
 217        default:
 218                return -EFAULT;
 219        }
 220
 221        if (crypto_shash_init(shash))
 222                return -EFAULT;
 223
 224        if (crypto_shash_update(shash, opad, block_size))
 225                return -EFAULT;
 226
 227        offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
 228        hash_state_out = (__be32 *)(hash->sha.state1 + offset);
 229        hash512_state_out = (__be64 *)hash_state_out;
 230
 231        switch (ctx->qat_hash_alg) {
 232        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 233                if (crypto_shash_export(shash, &sha1))
 234                        return -EFAULT;
 235                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 236                        *hash_state_out = cpu_to_be32(*(sha1.state + i));
 237                break;
 238        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 239                if (crypto_shash_export(shash, &sha256))
 240                        return -EFAULT;
 241                for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
 242                        *hash_state_out = cpu_to_be32(*(sha256.state + i));
 243                break;
 244        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 245                if (crypto_shash_export(shash, &sha512))
 246                        return -EFAULT;
 247                for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
 248                        *hash512_state_out = cpu_to_be64(*(sha512.state + i));
 249                break;
 250        default:
 251                return -EFAULT;
 252        }
 253        memzero_explicit(ipad, block_size);
 254        memzero_explicit(opad, block_size);
 255        return 0;
 256}
 257
 258static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
 259{
 260        header->hdr_flags =
 261                ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
 262        header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
 263        header->comn_req_flags =
 264                ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
 265                                            QAT_COMN_PTR_TYPE_SGL);
 266        ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
 267                                  ICP_QAT_FW_LA_PARTIAL_NONE);
 268        ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
 269                                           ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
 270        ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
 271                                ICP_QAT_FW_LA_NO_PROTO);
 272        ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
 273                                       ICP_QAT_FW_LA_NO_UPDATE_STATE);
 274}
 275
 276static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
 277                                         int alg,
 278                                         struct crypto_authenc_keys *keys,
 279                                         int mode)
 280{
 281        struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
 282        unsigned int digestsize = crypto_aead_authsize(aead_tfm);
 283
 284        struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
 285        struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
 286        struct icp_qat_hw_auth_algo_blk *hash =
 287                (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
 288                sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
 289        struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
 290        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
 291        struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
 292        void *ptr = &req_tmpl->cd_ctrl;
 293        struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
 294        struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
 295
 296        /* CD setup */
 297        cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
 298        memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
 299        hash->sha.inner_setup.auth_config.config =
 300                ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
 301                                             ctx->qat_hash_alg, digestsize);
 302        hash->sha.inner_setup.auth_counter.counter =
 303                cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
 304
 305        if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
 306                return -EFAULT;
 307
 308        /* Request setup */
 309        qat_alg_init_common_hdr(header);
 310        header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
 311        ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
 312                                           ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
 313        ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
 314                                   ICP_QAT_FW_LA_RET_AUTH_RES);
 315        ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
 316                                   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
 317        cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
 318        cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
 319
 320        /* Cipher CD config setup */
 321        cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
 322        cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
 323        cipher_cd_ctrl->cipher_cfg_offset = 0;
 324        ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 325        ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
 326        /* Auth CD config setup */
 327        hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
 328        hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
 329        hash_cd_ctrl->inner_res_sz = digestsize;
 330        hash_cd_ctrl->final_sz = digestsize;
 331
 332        switch (ctx->qat_hash_alg) {
 333        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 334                hash_cd_ctrl->inner_state1_sz =
 335                        round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
 336                hash_cd_ctrl->inner_state2_sz =
 337                        round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
 338                break;
 339        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 340                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
 341                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
 342                break;
 343        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 344                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
 345                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
 346                break;
 347        default:
 348                break;
 349        }
 350        hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
 351                        ((sizeof(struct icp_qat_hw_auth_setup) +
 352                         round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
 353        ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
 354        ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
 355        return 0;
 356}
 357
 358static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
 359                                         int alg,
 360                                         struct crypto_authenc_keys *keys,
 361                                         int mode)
 362{
 363        struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
 364        unsigned int digestsize = crypto_aead_authsize(aead_tfm);
 365
 366        struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
 367        struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
 368        struct icp_qat_hw_cipher_algo_blk *cipher =
 369                (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
 370                sizeof(struct icp_qat_hw_auth_setup) +
 371                roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
 372        struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
 373        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
 374        struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
 375        void *ptr = &req_tmpl->cd_ctrl;
 376        struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
 377        struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
 378        struct icp_qat_fw_la_auth_req_params *auth_param =
 379                (struct icp_qat_fw_la_auth_req_params *)
 380                ((char *)&req_tmpl->serv_specif_rqpars +
 381                sizeof(struct icp_qat_fw_la_cipher_req_params));
 382
 383        /* CD setup */
 384        cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
 385        memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
 386        hash->sha.inner_setup.auth_config.config =
 387                ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
 388                                             ctx->qat_hash_alg,
 389                                             digestsize);
 390        hash->sha.inner_setup.auth_counter.counter =
 391                cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
 392
 393        if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
 394                return -EFAULT;
 395
 396        /* Request setup */
 397        qat_alg_init_common_hdr(header);
 398        header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
 399        ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
 400                                           ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
 401        ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
 402                                   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
 403        ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
 404                                   ICP_QAT_FW_LA_CMP_AUTH_RES);
 405        cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
 406        cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
 407
 408        /* Cipher CD config setup */
 409        cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
 410        cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
 411        cipher_cd_ctrl->cipher_cfg_offset =
 412                (sizeof(struct icp_qat_hw_auth_setup) +
 413                 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
 414        ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 415        ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
 416
 417        /* Auth CD config setup */
 418        hash_cd_ctrl->hash_cfg_offset = 0;
 419        hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
 420        hash_cd_ctrl->inner_res_sz = digestsize;
 421        hash_cd_ctrl->final_sz = digestsize;
 422
 423        switch (ctx->qat_hash_alg) {
 424        case ICP_QAT_HW_AUTH_ALGO_SHA1:
 425                hash_cd_ctrl->inner_state1_sz =
 426                        round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
 427                hash_cd_ctrl->inner_state2_sz =
 428                        round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
 429                break;
 430        case ICP_QAT_HW_AUTH_ALGO_SHA256:
 431                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
 432                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
 433                break;
 434        case ICP_QAT_HW_AUTH_ALGO_SHA512:
 435                hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
 436                hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
 437                break;
 438        default:
 439                break;
 440        }
 441
 442        hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
 443                        ((sizeof(struct icp_qat_hw_auth_setup) +
 444                         round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
 445        auth_param->auth_res_sz = digestsize;
 446        ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
 447        ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 448        return 0;
 449}
 450
 451static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
 452                                        struct icp_qat_fw_la_bulk_req *req,
 453                                        struct icp_qat_hw_cipher_algo_blk *cd,
 454                                        const uint8_t *key, unsigned int keylen)
 455{
 456        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
 457        struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
 458        struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
 459
 460        memcpy(cd->aes.key, key, keylen);
 461        qat_alg_init_common_hdr(header);
 462        header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
 463        cd_pars->u.s.content_desc_params_sz =
 464                                sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
 465        /* Cipher CD config setup */
 466        cd_ctrl->cipher_key_sz = keylen >> 3;
 467        cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
 468        cd_ctrl->cipher_cfg_offset = 0;
 469        ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
 470        ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
 471}
 472
 473static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
 474                                        int alg, const uint8_t *key,
 475                                        unsigned int keylen, int mode)
 476{
 477        struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
 478        struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
 479        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
 480
 481        qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
 482        cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
 483        enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
 484}
 485
 486static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
 487                                        int alg, const uint8_t *key,
 488                                        unsigned int keylen, int mode)
 489{
 490        struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
 491        struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
 492        struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
 493
 494        qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
 495        cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
 496
 497        if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
 498                dec_cd->aes.cipher_config.val =
 499                                        QAT_AES_HW_CONFIG_DEC(alg, mode);
 500        else
 501                dec_cd->aes.cipher_config.val =
 502                                        QAT_AES_HW_CONFIG_ENC(alg, mode);
 503}
 504
 505static int qat_alg_validate_key(int key_len, int *alg, int mode)
 506{
 507        if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
 508                switch (key_len) {
 509                case AES_KEYSIZE_128:
 510                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
 511                        break;
 512                case AES_KEYSIZE_192:
 513                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
 514                        break;
 515                case AES_KEYSIZE_256:
 516                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
 517                        break;
 518                default:
 519                        return -EINVAL;
 520                }
 521        } else {
 522                switch (key_len) {
 523                case AES_KEYSIZE_128 << 1:
 524                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
 525                        break;
 526                case AES_KEYSIZE_256 << 1:
 527                        *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
 528                        break;
 529                default:
 530                        return -EINVAL;
 531                }
 532        }
 533        return 0;
 534}
 535
 536static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
 537                                      unsigned int keylen,  int mode)
 538{
 539        struct crypto_authenc_keys keys;
 540        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 541        int alg;
 542
 543        if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
 544                return -EFAULT;
 545
 546        if (crypto_authenc_extractkeys(&keys, key, keylen))
 547                goto bad_key;
 548
 549        if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
 550                goto bad_key;
 551
 552        if (qat_alg_aead_init_enc_session(ctx, alg, &keys, mode))
 553                goto error;
 554
 555        if (qat_alg_aead_init_dec_session(ctx, alg, &keys, mode))
 556                goto error;
 557
 558        return 0;
 559bad_key:
 560        crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 561        return -EINVAL;
 562error:
 563        return -EFAULT;
 564}
 565
 566static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
 567                                            const uint8_t *key,
 568                                            unsigned int keylen,
 569                                            int mode)
 570{
 571        int alg;
 572
 573        if (qat_alg_validate_key(keylen, &alg, mode))
 574                goto bad_key;
 575
 576        qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
 577        qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
 578        return 0;
 579bad_key:
 580        crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 581        return -EINVAL;
 582}
 583
 584static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
 585                               unsigned int keylen)
 586{
 587        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 588        struct device *dev;
 589
 590        spin_lock(&ctx->lock);
 591        if (ctx->enc_cd) {
 592                /* rekeying */
 593                dev = &GET_DEV(ctx->inst->accel_dev);
 594                memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
 595                memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
 596                memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
 597                memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
 598        } else {
 599                /* new key */
 600                int node = get_current_node();
 601                struct qat_crypto_instance *inst =
 602                                qat_crypto_get_instance_node(node);
 603                if (!inst) {
 604                        spin_unlock(&ctx->lock);
 605                        return -EINVAL;
 606                }
 607
 608                dev = &GET_DEV(inst->accel_dev);
 609                ctx->inst = inst;
 610                ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
 611                                                  &ctx->enc_cd_paddr,
 612                                                  GFP_ATOMIC);
 613                if (!ctx->enc_cd) {
 614                        spin_unlock(&ctx->lock);
 615                        return -ENOMEM;
 616                }
 617                ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
 618                                                  &ctx->dec_cd_paddr,
 619                                                  GFP_ATOMIC);
 620                if (!ctx->dec_cd) {
 621                        spin_unlock(&ctx->lock);
 622                        goto out_free_enc;
 623                }
 624        }
 625        spin_unlock(&ctx->lock);
 626        if (qat_alg_aead_init_sessions(tfm, key, keylen,
 627                                       ICP_QAT_HW_CIPHER_CBC_MODE))
 628                goto out_free_all;
 629
 630        return 0;
 631
 632out_free_all:
 633        memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
 634        dma_free_coherent(dev, sizeof(struct qat_alg_cd),
 635                          ctx->dec_cd, ctx->dec_cd_paddr);
 636        ctx->dec_cd = NULL;
 637out_free_enc:
 638        memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
 639        dma_free_coherent(dev, sizeof(struct qat_alg_cd),
 640                          ctx->enc_cd, ctx->enc_cd_paddr);
 641        ctx->enc_cd = NULL;
 642        return -ENOMEM;
 643}
 644
 645static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
 646                              struct qat_crypto_request *qat_req)
 647{
 648        struct device *dev = &GET_DEV(inst->accel_dev);
 649        struct qat_alg_buf_list *bl = qat_req->buf.bl;
 650        struct qat_alg_buf_list *blout = qat_req->buf.blout;
 651        dma_addr_t blp = qat_req->buf.blp;
 652        dma_addr_t blpout = qat_req->buf.bloutp;
 653        size_t sz = qat_req->buf.sz;
 654        size_t sz_out = qat_req->buf.sz_out;
 655        int i;
 656
 657        kfree(qat_req->buf.iv);
 658        for (i = 0; i < bl->num_bufs; i++)
 659                dma_unmap_single(dev, bl->bufers[i].addr,
 660                                 bl->bufers[i].len, DMA_BIDIRECTIONAL);
 661
 662        dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
 663        kfree(bl);
 664        if (blp != blpout) {
 665                /* If out of place operation dma unmap only data */
 666                int bufless = blout->num_bufs - blout->num_mapped_bufs;
 667
 668                for (i = bufless; i < blout->num_bufs; i++) {
 669                        dma_unmap_single(dev, blout->bufers[i].addr,
 670                                         blout->bufers[i].len,
 671                                         DMA_BIDIRECTIONAL);
 672                }
 673                dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
 674                kfree(blout);
 675        }
 676}
 677
 678static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
 679                               struct scatterlist *assoc, int assoclen,
 680                               struct scatterlist *sgl,
 681                               struct scatterlist *sglout, uint8_t *iv,
 682                               uint8_t ivlen,
 683                               struct qat_crypto_request *qat_req)
 684{
 685        struct device *dev = &GET_DEV(inst->accel_dev);
 686        int i, bufs = 0, sg_nctr = 0;
 687        int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
 688        struct qat_alg_buf_list *bufl;
 689        struct qat_alg_buf_list *buflout = NULL;
 690        dma_addr_t blp;
 691        dma_addr_t bloutp = 0;
 692        struct scatterlist *sg;
 693        size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
 694                        ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
 695        u8 *civ = NULL;
 696
 697        if (unlikely(!n))
 698                return -EINVAL;
 699
 700        bufl = kzalloc_node(sz, GFP_ATOMIC,
 701                            dev_to_node(&GET_DEV(inst->accel_dev)));
 702        if (unlikely(!bufl))
 703                return -ENOMEM;
 704
 705        blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
 706        if (unlikely(dma_mapping_error(dev, blp)))
 707                goto err_in;
 708
 709        for_each_sg(assoc, sg, assoc_n, i) {
 710                if (!sg->length)
 711                        continue;
 712
 713                if (!(assoclen > 0))
 714                        break;
 715
 716                bufl->bufers[bufs].addr =
 717                        dma_map_single(dev, sg_virt(sg),
 718                                       min_t(int, assoclen, sg->length),
 719                                       DMA_BIDIRECTIONAL);
 720                bufl->bufers[bufs].len = min_t(int, assoclen, sg->length);
 721                if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
 722                        goto err_in;
 723                bufs++;
 724                assoclen -= sg->length;
 725        }
 726
 727        if (ivlen) {
 728                civ = kmalloc_node(ivlen, GFP_ATOMIC,
 729                                  dev_to_node(&GET_DEV(inst->accel_dev)));
 730                if (!civ)
 731                        goto err_in;
 732                memcpy(civ, iv, ivlen);
 733
 734                bufl->bufers[bufs].addr = dma_map_single(dev, civ, ivlen,
 735                                                         DMA_BIDIRECTIONAL);
 736                bufl->bufers[bufs].len = ivlen;
 737                if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
 738                        goto err_in;
 739                bufs++;
 740                qat_req->buf.copyback = 1;
 741        }
 742
 743        for_each_sg(sgl, sg, n, i) {
 744                int y = sg_nctr + bufs;
 745
 746                if (!sg->length)
 747                        continue;
 748
 749                bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
 750                                                      sg->length,
 751                                                      DMA_BIDIRECTIONAL);
 752                bufl->bufers[y].len = sg->length;
 753                if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
 754                        goto err_in;
 755                sg_nctr++;
 756        }
 757        bufl->num_bufs = sg_nctr + bufs;
 758        qat_req->buf.bl = bufl;
 759        qat_req->buf.blp = blp;
 760        qat_req->buf.sz = sz;
 761        qat_req->buf.initial_iv = iv;
 762        qat_req->buf.iv = civ;
 763        qat_req->buf.ivlen = ivlen;
 764        /* Handle out of place operation */
 765        if (sgl != sglout) {
 766                struct qat_alg_buf *bufers;
 767
 768                n = sg_nents(sglout);
 769                sz_out = sizeof(struct qat_alg_buf_list) +
 770                        ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
 771                sg_nctr = 0;
 772                buflout = kzalloc_node(sz_out, GFP_ATOMIC,
 773                                       dev_to_node(&GET_DEV(inst->accel_dev)));
 774                if (unlikely(!buflout))
 775                        goto err_in;
 776                bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
 777                if (unlikely(dma_mapping_error(dev, bloutp)))
 778                        goto err_out;
 779                bufers = buflout->bufers;
 780                /* For out of place operation dma map only data and
 781                 * reuse assoc mapping and iv */
 782                for (i = 0; i < bufs; i++) {
 783                        bufers[i].len = bufl->bufers[i].len;
 784                        bufers[i].addr = bufl->bufers[i].addr;
 785                }
 786                for_each_sg(sglout, sg, n, i) {
 787                        int y = sg_nctr + bufs;
 788
 789                        if (!sg->length)
 790                                continue;
 791
 792                        bufers[y].addr = dma_map_single(dev, sg_virt(sg),
 793                                                        sg->length,
 794                                                        DMA_BIDIRECTIONAL);
 795                        if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
 796                                goto err_out;
 797                        bufers[y].len = sg->length;
 798                        sg_nctr++;
 799                }
 800                buflout->num_bufs = sg_nctr + bufs;
 801                buflout->num_mapped_bufs = sg_nctr;
 802                qat_req->buf.blout = buflout;
 803                qat_req->buf.bloutp = bloutp;
 804                qat_req->buf.sz_out = sz_out;
 805        } else {
 806                /* Otherwise set the src and dst to the same address */
 807                qat_req->buf.bloutp = qat_req->buf.blp;
 808                qat_req->buf.sz_out = 0;
 809        }
 810        return 0;
 811
 812err_out:
 813        n = sg_nents(sglout);
 814        for (i = 0; i < n; i++)
 815                if (!dma_mapping_error(dev, buflout->bufers[i].addr))
 816                        dma_unmap_single(dev, buflout->bufers[i].addr,
 817                                         buflout->bufers[i].len,
 818                                         DMA_BIDIRECTIONAL);
 819        if (!dma_mapping_error(dev, bloutp))
 820                dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
 821        kfree(buflout);
 822
 823err_in:
 824        n = sg_nents(sgl);
 825        for (i = 0; i < n + bufs; i++)
 826                if (!dma_mapping_error(dev, bufl->bufers[i].addr))
 827                        dma_unmap_single(dev, bufl->bufers[i].addr,
 828                                         bufl->bufers[i].len,
 829                                         DMA_BIDIRECTIONAL);
 830
 831        if (!dma_mapping_error(dev, blp))
 832                dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
 833        kfree(bufl);
 834
 835        dev_err(dev, "Failed to map buf for dma\n");
 836        return -ENOMEM;
 837}
 838
 839static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
 840                                  struct qat_crypto_request *qat_req)
 841{
 842        struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
 843        struct qat_crypto_instance *inst = ctx->inst;
 844        struct aead_request *areq = qat_req->aead_req;
 845        uint8_t stat_filed = qat_resp->comn_resp.comn_status;
 846        int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
 847
 848        if (qat_req->buf.copyback)
 849                memcpy(qat_req->buf.initial_iv, qat_req->buf.iv,
 850                       qat_req->buf.ivlen);
 851
 852        qat_alg_free_bufl(inst, qat_req);
 853        if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
 854                res = -EBADMSG;
 855        areq->base.complete(&areq->base, res);
 856}
 857
 858static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
 859                                        struct qat_crypto_request *qat_req)
 860{
 861        struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
 862        struct qat_crypto_instance *inst = ctx->inst;
 863        struct ablkcipher_request *areq = qat_req->ablkcipher_req;
 864        uint8_t stat_filed = qat_resp->comn_resp.comn_status;
 865        int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
 866
 867        qat_alg_free_bufl(inst, qat_req);
 868        if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
 869                res = -EINVAL;
 870        areq->base.complete(&areq->base, res);
 871}
 872
 873void qat_alg_callback(void *resp)
 874{
 875        struct icp_qat_fw_la_resp *qat_resp = resp;
 876        struct qat_crypto_request *qat_req =
 877                                (void *)(__force long)qat_resp->opaque_data;
 878
 879        qat_req->cb(qat_resp, qat_req);
 880}
 881
 882static int qat_alg_aead_dec(struct aead_request *areq)
 883{
 884        struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
 885        struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
 886        struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
 887        struct qat_crypto_request *qat_req = aead_request_ctx(areq);
 888        struct icp_qat_fw_la_cipher_req_params *cipher_param;
 889        struct icp_qat_fw_la_auth_req_params *auth_param;
 890        struct icp_qat_fw_la_bulk_req *msg;
 891        int digst_size = crypto_aead_authsize(aead_tfm);
 892        int ret, ctr = 0;
 893
 894        ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
 895                                  areq->src, areq->dst, areq->iv,
 896                                  AES_BLOCK_SIZE, qat_req);
 897        if (unlikely(ret))
 898                return ret;
 899
 900        msg = &qat_req->req;
 901        *msg = ctx->dec_fw_req;
 902        qat_req->aead_ctx = ctx;
 903        qat_req->aead_req = areq;
 904        qat_req->cb = qat_aead_alg_callback;
 905        qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
 906        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
 907        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
 908        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
 909        cipher_param->cipher_length = areq->cryptlen - digst_size;
 910        cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
 911        memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
 912        auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
 913        auth_param->auth_off = 0;
 914        auth_param->auth_len = areq->assoclen +
 915                                cipher_param->cipher_length + AES_BLOCK_SIZE;
 916        do {
 917                ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
 918        } while (ret == -EAGAIN && ctr++ < 10);
 919
 920        if (ret == -EAGAIN) {
 921                qat_alg_free_bufl(ctx->inst, qat_req);
 922                return -EBUSY;
 923        }
 924        return -EINPROGRESS;
 925}
 926
 927static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
 928                                     int enc_iv)
 929{
 930        struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
 931        struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
 932        struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
 933        struct qat_crypto_request *qat_req = aead_request_ctx(areq);
 934        struct icp_qat_fw_la_cipher_req_params *cipher_param;
 935        struct icp_qat_fw_la_auth_req_params *auth_param;
 936        struct icp_qat_fw_la_bulk_req *msg;
 937        int ret, ctr = 0;
 938
 939        ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
 940                                  areq->src, areq->dst, iv, AES_BLOCK_SIZE,
 941                                  qat_req);
 942        if (unlikely(ret))
 943                return ret;
 944
 945        msg = &qat_req->req;
 946        *msg = ctx->enc_fw_req;
 947        qat_req->aead_ctx = ctx;
 948        qat_req->aead_req = areq;
 949        qat_req->cb = qat_aead_alg_callback;
 950        qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
 951        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
 952        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
 953        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
 954        auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
 955
 956        if (enc_iv) {
 957                cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
 958                cipher_param->cipher_offset = areq->assoclen;
 959        } else {
 960                memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
 961                cipher_param->cipher_length = areq->cryptlen;
 962                cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
 963        }
 964        auth_param->auth_off = 0;
 965        auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
 966
 967        do {
 968                ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
 969        } while (ret == -EAGAIN && ctr++ < 10);
 970
 971        if (ret == -EAGAIN) {
 972                qat_alg_free_bufl(ctx->inst, qat_req);
 973                return -EBUSY;
 974        }
 975        return -EINPROGRESS;
 976}
 977
 978static int qat_alg_aead_enc(struct aead_request *areq)
 979{
 980        return qat_alg_aead_enc_internal(areq, areq->iv, 0);
 981}
 982
 983static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
 984{
 985        struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
 986        struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
 987        struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
 988        __be64 seq;
 989
 990        memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
 991        seq = cpu_to_be64(req->seq);
 992        memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
 993               &seq, sizeof(uint64_t));
 994        return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
 995}
 996
 997static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
 998                                     const u8 *key, unsigned int keylen,
 999                                     int mode)
1000{
1001        struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
1002        struct device *dev;
1003
1004        spin_lock(&ctx->lock);
1005        if (ctx->enc_cd) {
1006                /* rekeying */
1007                dev = &GET_DEV(ctx->inst->accel_dev);
1008                memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1009                memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
1010                memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
1011                memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
1012        } else {
1013                /* new key */
1014                int node = get_current_node();
1015                struct qat_crypto_instance *inst =
1016                                qat_crypto_get_instance_node(node);
1017                if (!inst) {
1018                        spin_unlock(&ctx->lock);
1019                        return -EINVAL;
1020                }
1021
1022                dev = &GET_DEV(inst->accel_dev);
1023                ctx->inst = inst;
1024                ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
1025                                                  &ctx->enc_cd_paddr,
1026                                                  GFP_ATOMIC);
1027                if (!ctx->enc_cd) {
1028                        spin_unlock(&ctx->lock);
1029                        return -ENOMEM;
1030                }
1031                ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
1032                                                  &ctx->dec_cd_paddr,
1033                                                  GFP_ATOMIC);
1034                if (!ctx->dec_cd) {
1035                        spin_unlock(&ctx->lock);
1036                        goto out_free_enc;
1037                }
1038        }
1039        spin_unlock(&ctx->lock);
1040        if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
1041                goto out_free_all;
1042
1043        return 0;
1044
1045out_free_all:
1046        memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
1047        dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1048                          ctx->dec_cd, ctx->dec_cd_paddr);
1049        ctx->dec_cd = NULL;
1050out_free_enc:
1051        memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1052        dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1053                          ctx->enc_cd, ctx->enc_cd_paddr);
1054        ctx->enc_cd = NULL;
1055        return -ENOMEM;
1056}
1057
1058static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
1059                                         const u8 *key, unsigned int keylen)
1060{
1061        return qat_alg_ablkcipher_setkey(tfm, key, keylen,
1062                                         ICP_QAT_HW_CIPHER_CBC_MODE);
1063}
1064
1065static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
1066                                         const u8 *key, unsigned int keylen)
1067{
1068        return qat_alg_ablkcipher_setkey(tfm, key, keylen,
1069                                         ICP_QAT_HW_CIPHER_CTR_MODE);
1070}
1071
1072static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
1073                                         const u8 *key, unsigned int keylen)
1074{
1075        return qat_alg_ablkcipher_setkey(tfm, key, keylen,
1076                                         ICP_QAT_HW_CIPHER_XTS_MODE);
1077}
1078
1079static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
1080{
1081        struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1082        struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1083        struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1084        struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1085        struct icp_qat_fw_la_cipher_req_params *cipher_param;
1086        struct icp_qat_fw_la_bulk_req *msg;
1087        int ret, ctr = 0;
1088
1089        ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
1090                                  NULL, 0, qat_req);
1091        if (unlikely(ret))
1092                return ret;
1093
1094        msg = &qat_req->req;
1095        *msg = ctx->enc_fw_req;
1096        qat_req->ablkcipher_ctx = ctx;
1097        qat_req->ablkcipher_req = req;
1098        qat_req->cb = qat_ablkcipher_alg_callback;
1099        qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1100        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1101        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1102        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1103        cipher_param->cipher_length = req->nbytes;
1104        cipher_param->cipher_offset = 0;
1105        memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1106        do {
1107                ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1108        } while (ret == -EAGAIN && ctr++ < 10);
1109
1110        if (ret == -EAGAIN) {
1111                qat_alg_free_bufl(ctx->inst, qat_req);
1112                return -EBUSY;
1113        }
1114        return -EINPROGRESS;
1115}
1116
1117static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1118{
1119        struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1120        struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1121        struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1122        struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1123        struct icp_qat_fw_la_cipher_req_params *cipher_param;
1124        struct icp_qat_fw_la_bulk_req *msg;
1125        int ret, ctr = 0;
1126
1127        ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
1128                                  NULL, 0, qat_req);
1129        if (unlikely(ret))
1130                return ret;
1131
1132        msg = &qat_req->req;
1133        *msg = ctx->dec_fw_req;
1134        qat_req->ablkcipher_ctx = ctx;
1135        qat_req->ablkcipher_req = req;
1136        qat_req->cb = qat_ablkcipher_alg_callback;
1137        qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1138        qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1139        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1140        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1141        cipher_param->cipher_length = req->nbytes;
1142        cipher_param->cipher_offset = 0;
1143        memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1144        do {
1145                ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1146        } while (ret == -EAGAIN && ctr++ < 10);
1147
1148        if (ret == -EAGAIN) {
1149                qat_alg_free_bufl(ctx->inst, qat_req);
1150                return -EBUSY;
1151        }
1152        return -EINPROGRESS;
1153}
1154
1155static int qat_alg_aead_init(struct crypto_tfm *tfm,
1156                             enum icp_qat_hw_auth_algo hash,
1157                             const char *hash_name)
1158{
1159        struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1160
1161        ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1162        if (IS_ERR(ctx->hash_tfm))
1163                return -EFAULT;
1164        spin_lock_init(&ctx->lock);
1165        ctx->qat_hash_alg = hash;
1166        ctx->tfm = tfm;
1167        crypto_aead_set_reqsize(__crypto_aead_cast(tfm), sizeof(struct qat_crypto_request));
1168        return 0;
1169}
1170
1171static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
1172{
1173        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1174}
1175
1176static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
1177{
1178        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1179}
1180
1181static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
1182{
1183        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1184}
1185
1186static void qat_alg_aead_exit(struct crypto_tfm *tfm)
1187{
1188        struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1189        struct qat_crypto_instance *inst = ctx->inst;
1190        struct device *dev;
1191
1192        if (!IS_ERR(ctx->hash_tfm))
1193                crypto_free_shash(ctx->hash_tfm);
1194
1195        if (!inst)
1196                return;
1197
1198        dev = &GET_DEV(inst->accel_dev);
1199        if (ctx->enc_cd) {
1200                memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1201                dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1202                                  ctx->enc_cd, ctx->enc_cd_paddr);
1203        }
1204        if (ctx->dec_cd) {
1205                memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1206                dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1207                                  ctx->dec_cd, ctx->dec_cd_paddr);
1208        }
1209        qat_crypto_put_instance(inst);
1210}
1211
1212static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1213{
1214        struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1215
1216        spin_lock_init(&ctx->lock);
1217        tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1218        ctx->tfm = tfm;
1219        return 0;
1220}
1221
1222static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1223{
1224        struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1225        struct qat_crypto_instance *inst = ctx->inst;
1226        struct device *dev;
1227
1228        if (!inst)
1229                return;
1230
1231        dev = &GET_DEV(inst->accel_dev);
1232        if (ctx->enc_cd) {
1233                memset(ctx->enc_cd, 0,
1234                       sizeof(struct icp_qat_hw_cipher_algo_blk));
1235                dma_free_coherent(dev,
1236                                  sizeof(struct icp_qat_hw_cipher_algo_blk),
1237                                  ctx->enc_cd, ctx->enc_cd_paddr);
1238        }
1239        if (ctx->dec_cd) {
1240                memset(ctx->dec_cd, 0,
1241                       sizeof(struct icp_qat_hw_cipher_algo_blk));
1242                dma_free_coherent(dev,
1243                                  sizeof(struct icp_qat_hw_cipher_algo_blk),
1244                                  ctx->dec_cd, ctx->dec_cd_paddr);
1245        }
1246        qat_crypto_put_instance(inst);
1247}
1248
1249static struct crypto_alg qat_algs[] = { {
1250        .cra_name = "authenc(hmac(sha1),cbc(aes))",
1251        .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1252        .cra_priority = 4001,
1253        .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1254        .cra_blocksize = AES_BLOCK_SIZE,
1255        .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1256        .cra_alignmask = 0,
1257        .cra_type = &crypto_aead_type,
1258        .cra_module = THIS_MODULE,
1259        .cra_init = qat_alg_aead_sha1_init,
1260        .cra_exit = qat_alg_aead_exit,
1261        .cra_u = {
1262                .aead = {
1263                        .setkey = qat_alg_aead_setkey,
1264                        .decrypt = qat_alg_aead_dec,
1265                        .encrypt = qat_alg_aead_enc,
1266                        .givencrypt = qat_alg_aead_genivenc,
1267                        .ivsize = AES_BLOCK_SIZE,
1268                        .maxauthsize = SHA1_DIGEST_SIZE,
1269                },
1270        },
1271}, {
1272        .cra_name = "authenc(hmac(sha256),cbc(aes))",
1273        .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1274        .cra_priority = 4001,
1275        .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1276        .cra_blocksize = AES_BLOCK_SIZE,
1277        .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1278        .cra_alignmask = 0,
1279        .cra_type = &crypto_aead_type,
1280        .cra_module = THIS_MODULE,
1281        .cra_init = qat_alg_aead_sha256_init,
1282        .cra_exit = qat_alg_aead_exit,
1283        .cra_u = {
1284                .aead = {
1285                        .setkey = qat_alg_aead_setkey,
1286                        .decrypt = qat_alg_aead_dec,
1287                        .encrypt = qat_alg_aead_enc,
1288                        .givencrypt = qat_alg_aead_genivenc,
1289                        .ivsize = AES_BLOCK_SIZE,
1290                        .maxauthsize = SHA256_DIGEST_SIZE,
1291                },
1292        },
1293}, {
1294        .cra_name = "authenc(hmac(sha512),cbc(aes))",
1295        .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1296        .cra_priority = 4001,
1297        .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1298        .cra_blocksize = AES_BLOCK_SIZE,
1299        .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1300        .cra_alignmask = 0,
1301        .cra_type = &crypto_aead_type,
1302        .cra_module = THIS_MODULE,
1303        .cra_init = qat_alg_aead_sha512_init,
1304        .cra_exit = qat_alg_aead_exit,
1305        .cra_u = {
1306                .aead = {
1307                        .setkey = qat_alg_aead_setkey,
1308                        .decrypt = qat_alg_aead_dec,
1309                        .encrypt = qat_alg_aead_enc,
1310                        .givencrypt = qat_alg_aead_genivenc,
1311                        .ivsize = AES_BLOCK_SIZE,
1312                        .maxauthsize = SHA512_DIGEST_SIZE,
1313                },
1314        },
1315}, {
1316        .cra_name = "cbc(aes)",
1317        .cra_driver_name = "qat_aes_cbc",
1318        .cra_priority = 4001,
1319        .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1320        .cra_blocksize = AES_BLOCK_SIZE,
1321        .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1322        .cra_alignmask = 0,
1323        .cra_type = &crypto_ablkcipher_type,
1324        .cra_module = THIS_MODULE,
1325        .cra_init = qat_alg_ablkcipher_init,
1326        .cra_exit = qat_alg_ablkcipher_exit,
1327        .cra_u = {
1328                .ablkcipher = {
1329                        .setkey = qat_alg_ablkcipher_cbc_setkey,
1330                        .decrypt = qat_alg_ablkcipher_decrypt,
1331                        .encrypt = qat_alg_ablkcipher_encrypt,
1332                        .min_keysize = AES_MIN_KEY_SIZE,
1333                        .max_keysize = AES_MAX_KEY_SIZE,
1334                        .ivsize = AES_BLOCK_SIZE,
1335                },
1336        },
1337}, {
1338        .cra_name = "ctr(aes)",
1339        .cra_driver_name = "qat_aes_ctr",
1340        .cra_priority = 4001,
1341        .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1342        .cra_blocksize = AES_BLOCK_SIZE,
1343        .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1344        .cra_alignmask = 0,
1345        .cra_type = &crypto_ablkcipher_type,
1346        .cra_module = THIS_MODULE,
1347        .cra_init = qat_alg_ablkcipher_init,
1348        .cra_exit = qat_alg_ablkcipher_exit,
1349        .cra_u = {
1350                .ablkcipher = {
1351                        .setkey = qat_alg_ablkcipher_ctr_setkey,
1352                        .decrypt = qat_alg_ablkcipher_decrypt,
1353                        .encrypt = qat_alg_ablkcipher_encrypt,
1354                        .min_keysize = AES_MIN_KEY_SIZE,
1355                        .max_keysize = AES_MAX_KEY_SIZE,
1356                        .ivsize = AES_BLOCK_SIZE,
1357                },
1358        },
1359}, {
1360        .cra_name = "xts(aes)",
1361        .cra_driver_name = "qat_aes_xts",
1362        .cra_priority = 4001,
1363        .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1364        .cra_blocksize = AES_BLOCK_SIZE,
1365        .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1366        .cra_alignmask = 0,
1367        .cra_type = &crypto_ablkcipher_type,
1368        .cra_module = THIS_MODULE,
1369        .cra_init = qat_alg_ablkcipher_init,
1370        .cra_exit = qat_alg_ablkcipher_exit,
1371        .cra_u = {
1372                .ablkcipher = {
1373                        .setkey = qat_alg_ablkcipher_xts_setkey,
1374                        .decrypt = qat_alg_ablkcipher_decrypt,
1375                        .encrypt = qat_alg_ablkcipher_encrypt,
1376                        .min_keysize = 2 * AES_MIN_KEY_SIZE,
1377                        .max_keysize = 2 * AES_MAX_KEY_SIZE,
1378                        .ivsize = AES_BLOCK_SIZE,
1379                },
1380        },
1381} };
1382
1383int qat_algs_register(void)
1384{
1385        int ret = 0;
1386
1387        mutex_lock(&algs_lock);
1388        if (++active_devs == 1) {
1389                int i;
1390
1391                crypto_get_default_rng();
1392
1393                for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1394                        qat_algs[i].cra_flags =
1395                                (qat_algs[i].cra_type == &crypto_aead_type) ?
1396                                CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
1397                                CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1398
1399                ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1400                if (ret < 0)
1401                        crypto_put_default_rng();
1402        }
1403        mutex_unlock(&algs_lock);
1404        return ret;
1405}
1406
1407void qat_algs_unregister(void)
1408{
1409        int ret = 0;
1410
1411        mutex_lock(&algs_lock);
1412        if (--active_devs == 0) {
1413                ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1414
1415                crypto_put_default_rng();
1416        }
1417        mutex_unlock(&algs_lock);
1418}
1419