1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#include <linux/module.h>
48#include <linux/slab.h>
49#include <linux/crypto.h>
50#include <crypto/internal/aead.h>
51#include <crypto/aes.h>
52#include <crypto/sha.h>
53#include <crypto/hash.h>
54#include <crypto/algapi.h>
55#include <crypto/authenc.h>
56#include <linux/dma-mapping.h>
57#include "adf_accel_devices.h"
58#include "adf_transport.h"
59#include "adf_common_drv.h"
60#include "qat_crypto.h"
61#include "icp_qat_hw.h"
62#include "icp_qat_fw.h"
63#include "icp_qat_fw_la.h"
64
65#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
66 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
67 ICP_QAT_HW_CIPHER_NO_CONVERT, \
68 ICP_QAT_HW_CIPHER_ENCRYPT)
69
70#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
71 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
72 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
73 ICP_QAT_HW_CIPHER_DECRYPT)
74
75static DEFINE_MUTEX(algs_lock);
76static unsigned int active_devs;
77
78struct qat_alg_buf {
79 uint32_t len;
80 uint32_t resrvd;
81 uint64_t addr;
82} __packed;
83
84struct qat_alg_buf_list {
85 uint64_t resrvd;
86 uint32_t num_bufs;
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89} __packed __aligned(64);
90
91
92struct qat_alg_cd {
93 union {
94 struct qat_enc {
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
97 } qat_enc_cd;
98 struct qat_dec {
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
101 } qat_dec_cd;
102 };
103} __aligned(64);
104
105struct qat_alg_aead_ctx {
106 struct qat_alg_cd *enc_cd;
107 struct qat_alg_cd *dec_cd;
108 dma_addr_t enc_cd_paddr;
109 dma_addr_t dec_cd_paddr;
110 struct icp_qat_fw_la_bulk_req enc_fw_req;
111 struct icp_qat_fw_la_bulk_req dec_fw_req;
112 struct crypto_shash *hash_tfm;
113 enum icp_qat_hw_auth_algo qat_hash_alg;
114 struct qat_crypto_instance *inst;
115};
116
117struct qat_alg_ablkcipher_ctx {
118 struct icp_qat_hw_cipher_algo_blk *enc_cd;
119 struct icp_qat_hw_cipher_algo_blk *dec_cd;
120 dma_addr_t enc_cd_paddr;
121 dma_addr_t dec_cd_paddr;
122 struct icp_qat_fw_la_bulk_req enc_fw_req;
123 struct icp_qat_fw_la_bulk_req dec_fw_req;
124 struct qat_crypto_instance *inst;
125 struct crypto_tfm *tfm;
126 spinlock_t lock;
127};
128
129static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
130{
131 switch (qat_hash_alg) {
132 case ICP_QAT_HW_AUTH_ALGO_SHA1:
133 return ICP_QAT_HW_SHA1_STATE1_SZ;
134 case ICP_QAT_HW_AUTH_ALGO_SHA256:
135 return ICP_QAT_HW_SHA256_STATE1_SZ;
136 case ICP_QAT_HW_AUTH_ALGO_SHA512:
137 return ICP_QAT_HW_SHA512_STATE1_SZ;
138 default:
139 return -EFAULT;
140 };
141 return -EFAULT;
142}
143
144static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
145 struct qat_alg_aead_ctx *ctx,
146 const uint8_t *auth_key,
147 unsigned int auth_keylen)
148{
149 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
150 struct sha1_state sha1;
151 struct sha256_state sha256;
152 struct sha512_state sha512;
153 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
154 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
155 char ipad[block_size];
156 char opad[block_size];
157 __be32 *hash_state_out;
158 __be64 *hash512_state_out;
159 int i, offset;
160
161 memset(ipad, 0, block_size);
162 memset(opad, 0, block_size);
163 shash->tfm = ctx->hash_tfm;
164 shash->flags = 0x0;
165
166 if (auth_keylen > block_size) {
167 int ret = crypto_shash_digest(shash, auth_key,
168 auth_keylen, ipad);
169 if (ret)
170 return ret;
171
172 memcpy(opad, ipad, digest_size);
173 } else {
174 memcpy(ipad, auth_key, auth_keylen);
175 memcpy(opad, auth_key, auth_keylen);
176 }
177
178 for (i = 0; i < block_size; i++) {
179 char *ipad_ptr = ipad + i;
180 char *opad_ptr = opad + i;
181 *ipad_ptr ^= 0x36;
182 *opad_ptr ^= 0x5C;
183 }
184
185 if (crypto_shash_init(shash))
186 return -EFAULT;
187
188 if (crypto_shash_update(shash, ipad, block_size))
189 return -EFAULT;
190
191 hash_state_out = (__be32 *)hash->sha.state1;
192 hash512_state_out = (__be64 *)hash_state_out;
193
194 switch (ctx->qat_hash_alg) {
195 case ICP_QAT_HW_AUTH_ALGO_SHA1:
196 if (crypto_shash_export(shash, &sha1))
197 return -EFAULT;
198 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
199 *hash_state_out = cpu_to_be32(*(sha1.state + i));
200 break;
201 case ICP_QAT_HW_AUTH_ALGO_SHA256:
202 if (crypto_shash_export(shash, &sha256))
203 return -EFAULT;
204 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
205 *hash_state_out = cpu_to_be32(*(sha256.state + i));
206 break;
207 case ICP_QAT_HW_AUTH_ALGO_SHA512:
208 if (crypto_shash_export(shash, &sha512))
209 return -EFAULT;
210 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
211 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
212 break;
213 default:
214 return -EFAULT;
215 }
216
217 if (crypto_shash_init(shash))
218 return -EFAULT;
219
220 if (crypto_shash_update(shash, opad, block_size))
221 return -EFAULT;
222
223 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
224 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
225 hash512_state_out = (__be64 *)hash_state_out;
226
227 switch (ctx->qat_hash_alg) {
228 case ICP_QAT_HW_AUTH_ALGO_SHA1:
229 if (crypto_shash_export(shash, &sha1))
230 return -EFAULT;
231 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
232 *hash_state_out = cpu_to_be32(*(sha1.state + i));
233 break;
234 case ICP_QAT_HW_AUTH_ALGO_SHA256:
235 if (crypto_shash_export(shash, &sha256))
236 return -EFAULT;
237 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
238 *hash_state_out = cpu_to_be32(*(sha256.state + i));
239 break;
240 case ICP_QAT_HW_AUTH_ALGO_SHA512:
241 if (crypto_shash_export(shash, &sha512))
242 return -EFAULT;
243 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
244 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
245 break;
246 default:
247 return -EFAULT;
248 }
249 memzero_explicit(ipad, block_size);
250 memzero_explicit(opad, block_size);
251 return 0;
252}
253
254static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
255{
256 header->hdr_flags =
257 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
258 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
259 header->comn_req_flags =
260 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
261 QAT_COMN_PTR_TYPE_SGL);
262 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
263 ICP_QAT_FW_LA_PARTIAL_NONE);
264 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
265 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
266 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
267 ICP_QAT_FW_LA_NO_PROTO);
268 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
269 ICP_QAT_FW_LA_NO_UPDATE_STATE);
270}
271
272static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
273 int alg,
274 struct crypto_authenc_keys *keys,
275 int mode)
276{
277 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
278 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
279 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
280 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
281 struct icp_qat_hw_auth_algo_blk *hash =
282 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
283 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
284 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
285 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
286 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
287 void *ptr = &req_tmpl->cd_ctrl;
288 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
289 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
290
291
292 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
293 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
294 hash->sha.inner_setup.auth_config.config =
295 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
296 ctx->qat_hash_alg, digestsize);
297 hash->sha.inner_setup.auth_counter.counter =
298 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
299
300 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
301 return -EFAULT;
302
303
304 qat_alg_init_common_hdr(header);
305 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
306 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
307 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
308 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
309 ICP_QAT_FW_LA_RET_AUTH_RES);
310 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
311 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
312 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
313 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
314
315
316 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
317 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
318 cipher_cd_ctrl->cipher_cfg_offset = 0;
319 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
320 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
321
322 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
323 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
324 hash_cd_ctrl->inner_res_sz = digestsize;
325 hash_cd_ctrl->final_sz = digestsize;
326
327 switch (ctx->qat_hash_alg) {
328 case ICP_QAT_HW_AUTH_ALGO_SHA1:
329 hash_cd_ctrl->inner_state1_sz =
330 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
331 hash_cd_ctrl->inner_state2_sz =
332 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
333 break;
334 case ICP_QAT_HW_AUTH_ALGO_SHA256:
335 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
336 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
337 break;
338 case ICP_QAT_HW_AUTH_ALGO_SHA512:
339 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
340 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
341 break;
342 default:
343 break;
344 }
345 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
346 ((sizeof(struct icp_qat_hw_auth_setup) +
347 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
348 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
349 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
350 return 0;
351}
352
353static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
354 int alg,
355 struct crypto_authenc_keys *keys,
356 int mode)
357{
358 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
359 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
360 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
361 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
362 struct icp_qat_hw_cipher_algo_blk *cipher =
363 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
364 sizeof(struct icp_qat_hw_auth_setup) +
365 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
366 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
367 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
368 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
369 void *ptr = &req_tmpl->cd_ctrl;
370 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
371 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
372 struct icp_qat_fw_la_auth_req_params *auth_param =
373 (struct icp_qat_fw_la_auth_req_params *)
374 ((char *)&req_tmpl->serv_specif_rqpars +
375 sizeof(struct icp_qat_fw_la_cipher_req_params));
376
377
378 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
379 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
380 hash->sha.inner_setup.auth_config.config =
381 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
382 ctx->qat_hash_alg,
383 digestsize);
384 hash->sha.inner_setup.auth_counter.counter =
385 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
386
387 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
388 return -EFAULT;
389
390
391 qat_alg_init_common_hdr(header);
392 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
393 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
394 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
395 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
396 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
397 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
398 ICP_QAT_FW_LA_CMP_AUTH_RES);
399 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
400 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
401
402
403 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
404 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
405 cipher_cd_ctrl->cipher_cfg_offset =
406 (sizeof(struct icp_qat_hw_auth_setup) +
407 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
408 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
409 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
410
411
412 hash_cd_ctrl->hash_cfg_offset = 0;
413 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
414 hash_cd_ctrl->inner_res_sz = digestsize;
415 hash_cd_ctrl->final_sz = digestsize;
416
417 switch (ctx->qat_hash_alg) {
418 case ICP_QAT_HW_AUTH_ALGO_SHA1:
419 hash_cd_ctrl->inner_state1_sz =
420 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
421 hash_cd_ctrl->inner_state2_sz =
422 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
423 break;
424 case ICP_QAT_HW_AUTH_ALGO_SHA256:
425 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
426 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
427 break;
428 case ICP_QAT_HW_AUTH_ALGO_SHA512:
429 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
430 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
431 break;
432 default:
433 break;
434 }
435
436 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
437 ((sizeof(struct icp_qat_hw_auth_setup) +
438 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
439 auth_param->auth_res_sz = digestsize;
440 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
441 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
442 return 0;
443}
444
445static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
446 struct icp_qat_fw_la_bulk_req *req,
447 struct icp_qat_hw_cipher_algo_blk *cd,
448 const uint8_t *key, unsigned int keylen)
449{
450 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
451 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
452 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
453
454 memcpy(cd->aes.key, key, keylen);
455 qat_alg_init_common_hdr(header);
456 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
457 cd_pars->u.s.content_desc_params_sz =
458 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
459
460 cd_ctrl->cipher_key_sz = keylen >> 3;
461 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
462 cd_ctrl->cipher_cfg_offset = 0;
463 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
464 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
465}
466
467static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
468 int alg, const uint8_t *key,
469 unsigned int keylen, int mode)
470{
471 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
472 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
473 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
474
475 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
476 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
477 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
478}
479
480static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
481 int alg, const uint8_t *key,
482 unsigned int keylen, int mode)
483{
484 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
485 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
486 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
487
488 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
489 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
490
491 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
492 dec_cd->aes.cipher_config.val =
493 QAT_AES_HW_CONFIG_DEC(alg, mode);
494 else
495 dec_cd->aes.cipher_config.val =
496 QAT_AES_HW_CONFIG_ENC(alg, mode);
497}
498
499static int qat_alg_validate_key(int key_len, int *alg, int mode)
500{
501 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
502 switch (key_len) {
503 case AES_KEYSIZE_128:
504 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
505 break;
506 case AES_KEYSIZE_192:
507 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
508 break;
509 case AES_KEYSIZE_256:
510 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
511 break;
512 default:
513 return -EINVAL;
514 }
515 } else {
516 switch (key_len) {
517 case AES_KEYSIZE_128 << 1:
518 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
519 break;
520 case AES_KEYSIZE_256 << 1:
521 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
522 break;
523 default:
524 return -EINVAL;
525 }
526 }
527 return 0;
528}
529
530static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
531 unsigned int keylen, int mode)
532{
533 struct crypto_authenc_keys keys;
534 int alg;
535
536 if (crypto_authenc_extractkeys(&keys, key, keylen))
537 goto bad_key;
538
539 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
540 goto bad_key;
541
542 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
543 goto error;
544
545 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
546 goto error;
547
548 return 0;
549bad_key:
550 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
551 return -EINVAL;
552error:
553 return -EFAULT;
554}
555
556static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
557 const uint8_t *key,
558 unsigned int keylen,
559 int mode)
560{
561 int alg;
562
563 if (qat_alg_validate_key(keylen, &alg, mode))
564 goto bad_key;
565
566 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
567 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
568 return 0;
569bad_key:
570 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
571 return -EINVAL;
572}
573
574static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
575 unsigned int keylen)
576{
577 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
578 struct device *dev;
579
580 if (ctx->enc_cd) {
581
582 dev = &GET_DEV(ctx->inst->accel_dev);
583 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
584 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
585 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
586 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
587 } else {
588
589 int node = get_current_node();
590 struct qat_crypto_instance *inst =
591 qat_crypto_get_instance_node(node);
592 if (!inst) {
593 return -EINVAL;
594 }
595
596 dev = &GET_DEV(inst->accel_dev);
597 ctx->inst = inst;
598 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
599 &ctx->enc_cd_paddr,
600 GFP_ATOMIC);
601 if (!ctx->enc_cd) {
602 return -ENOMEM;
603 }
604 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
605 &ctx->dec_cd_paddr,
606 GFP_ATOMIC);
607 if (!ctx->dec_cd) {
608 goto out_free_enc;
609 }
610 }
611 if (qat_alg_aead_init_sessions(tfm, key, keylen,
612 ICP_QAT_HW_CIPHER_CBC_MODE))
613 goto out_free_all;
614
615 return 0;
616
617out_free_all:
618 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
619 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
620 ctx->dec_cd, ctx->dec_cd_paddr);
621 ctx->dec_cd = NULL;
622out_free_enc:
623 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
624 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
625 ctx->enc_cd, ctx->enc_cd_paddr);
626 ctx->enc_cd = NULL;
627 return -ENOMEM;
628}
629
630static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
631 struct qat_crypto_request *qat_req)
632{
633 struct device *dev = &GET_DEV(inst->accel_dev);
634 struct qat_alg_buf_list *bl = qat_req->buf.bl;
635 struct qat_alg_buf_list *blout = qat_req->buf.blout;
636 dma_addr_t blp = qat_req->buf.blp;
637 dma_addr_t blpout = qat_req->buf.bloutp;
638 size_t sz = qat_req->buf.sz;
639 size_t sz_out = qat_req->buf.sz_out;
640 int i;
641
642 for (i = 0; i < bl->num_bufs; i++)
643 dma_unmap_single(dev, bl->bufers[i].addr,
644 bl->bufers[i].len, DMA_BIDIRECTIONAL);
645
646 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
647 kfree(bl);
648 if (blp != blpout) {
649
650 int bufless = blout->num_bufs - blout->num_mapped_bufs;
651
652 for (i = bufless; i < blout->num_bufs; i++) {
653 dma_unmap_single(dev, blout->bufers[i].addr,
654 blout->bufers[i].len,
655 DMA_BIDIRECTIONAL);
656 }
657 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
658 kfree(blout);
659 }
660}
661
662static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
663 struct scatterlist *sgl,
664 struct scatterlist *sglout,
665 struct qat_crypto_request *qat_req)
666{
667 struct device *dev = &GET_DEV(inst->accel_dev);
668 int i, sg_nctr = 0;
669 int n = sg_nents(sgl);
670 struct qat_alg_buf_list *bufl;
671 struct qat_alg_buf_list *buflout = NULL;
672 dma_addr_t blp;
673 dma_addr_t bloutp = 0;
674 struct scatterlist *sg;
675 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
676 ((1 + n) * sizeof(struct qat_alg_buf));
677
678 if (unlikely(!n))
679 return -EINVAL;
680
681 bufl = kzalloc_node(sz, GFP_ATOMIC,
682 dev_to_node(&GET_DEV(inst->accel_dev)));
683 if (unlikely(!bufl))
684 return -ENOMEM;
685
686 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
687 if (unlikely(dma_mapping_error(dev, blp)))
688 goto err;
689
690 for_each_sg(sgl, sg, n, i) {
691 int y = sg_nctr;
692
693 if (!sg->length)
694 continue;
695
696 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
697 sg->length,
698 DMA_BIDIRECTIONAL);
699 bufl->bufers[y].len = sg->length;
700 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
701 goto err;
702 sg_nctr++;
703 }
704 bufl->num_bufs = sg_nctr;
705 qat_req->buf.bl = bufl;
706 qat_req->buf.blp = blp;
707 qat_req->buf.sz = sz;
708
709 if (sgl != sglout) {
710 struct qat_alg_buf *bufers;
711
712 n = sg_nents(sglout);
713 sz_out = sizeof(struct qat_alg_buf_list) +
714 ((1 + n) * sizeof(struct qat_alg_buf));
715 sg_nctr = 0;
716 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
717 dev_to_node(&GET_DEV(inst->accel_dev)));
718 if (unlikely(!buflout))
719 goto err;
720 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
721 if (unlikely(dma_mapping_error(dev, bloutp)))
722 goto err;
723 bufers = buflout->bufers;
724 for_each_sg(sglout, sg, n, i) {
725 int y = sg_nctr;
726
727 if (!sg->length)
728 continue;
729
730 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
731 sg->length,
732 DMA_BIDIRECTIONAL);
733 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
734 goto err;
735 bufers[y].len = sg->length;
736 sg_nctr++;
737 }
738 buflout->num_bufs = sg_nctr;
739 buflout->num_mapped_bufs = sg_nctr;
740 qat_req->buf.blout = buflout;
741 qat_req->buf.bloutp = bloutp;
742 qat_req->buf.sz_out = sz_out;
743 } else {
744
745 qat_req->buf.bloutp = qat_req->buf.blp;
746 qat_req->buf.sz_out = 0;
747 }
748 return 0;
749err:
750 dev_err(dev, "Failed to map buf for dma\n");
751 sg_nctr = 0;
752 for (i = 0; i < n; i++)
753 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
754 dma_unmap_single(dev, bufl->bufers[i].addr,
755 bufl->bufers[i].len,
756 DMA_BIDIRECTIONAL);
757
758 if (!dma_mapping_error(dev, blp))
759 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
760 kfree(bufl);
761 if (sgl != sglout && buflout) {
762 n = sg_nents(sglout);
763 for (i = 0; i < n; i++)
764 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
765 dma_unmap_single(dev, buflout->bufers[i].addr,
766 buflout->bufers[i].len,
767 DMA_BIDIRECTIONAL);
768 if (!dma_mapping_error(dev, bloutp))
769 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
770 kfree(buflout);
771 }
772 return -ENOMEM;
773}
774
775static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
776 struct qat_crypto_request *qat_req)
777{
778 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
779 struct qat_crypto_instance *inst = ctx->inst;
780 struct aead_request *areq = qat_req->aead_req;
781 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
782 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
783
784 qat_alg_free_bufl(inst, qat_req);
785 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
786 res = -EBADMSG;
787 areq->base.complete(&areq->base, res);
788}
789
790static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
791 struct qat_crypto_request *qat_req)
792{
793 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
794 struct qat_crypto_instance *inst = ctx->inst;
795 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
796 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
797 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
798
799 qat_alg_free_bufl(inst, qat_req);
800 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
801 res = -EINVAL;
802 areq->base.complete(&areq->base, res);
803}
804
805void qat_alg_callback(void *resp)
806{
807 struct icp_qat_fw_la_resp *qat_resp = resp;
808 struct qat_crypto_request *qat_req =
809 (void *)(__force long)qat_resp->opaque_data;
810
811 qat_req->cb(qat_resp, qat_req);
812}
813
814static int qat_alg_aead_dec(struct aead_request *areq)
815{
816 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
817 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
818 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
819 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
820 struct icp_qat_fw_la_cipher_req_params *cipher_param;
821 struct icp_qat_fw_la_auth_req_params *auth_param;
822 struct icp_qat_fw_la_bulk_req *msg;
823 int digst_size = crypto_aead_authsize(aead_tfm);
824 int ret, ctr = 0;
825
826 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
827 if (unlikely(ret))
828 return ret;
829
830 msg = &qat_req->req;
831 *msg = ctx->dec_fw_req;
832 qat_req->aead_ctx = ctx;
833 qat_req->aead_req = areq;
834 qat_req->cb = qat_aead_alg_callback;
835 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
836 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
837 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
838 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
839 cipher_param->cipher_length = areq->cryptlen - digst_size;
840 cipher_param->cipher_offset = areq->assoclen;
841 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
842 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
843 auth_param->auth_off = 0;
844 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
845 do {
846 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
847 } while (ret == -EAGAIN && ctr++ < 10);
848
849 if (ret == -EAGAIN) {
850 qat_alg_free_bufl(ctx->inst, qat_req);
851 return -EBUSY;
852 }
853 return -EINPROGRESS;
854}
855
856static int qat_alg_aead_enc(struct aead_request *areq)
857{
858 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
859 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
860 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
861 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
862 struct icp_qat_fw_la_cipher_req_params *cipher_param;
863 struct icp_qat_fw_la_auth_req_params *auth_param;
864 struct icp_qat_fw_la_bulk_req *msg;
865 uint8_t *iv = areq->iv;
866 int ret, ctr = 0;
867
868 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
869 if (unlikely(ret))
870 return ret;
871
872 msg = &qat_req->req;
873 *msg = ctx->enc_fw_req;
874 qat_req->aead_ctx = ctx;
875 qat_req->aead_req = areq;
876 qat_req->cb = qat_aead_alg_callback;
877 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
878 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
879 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
880 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
881 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
882
883 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
884 cipher_param->cipher_length = areq->cryptlen;
885 cipher_param->cipher_offset = areq->assoclen;
886
887 auth_param->auth_off = 0;
888 auth_param->auth_len = areq->assoclen + areq->cryptlen;
889
890 do {
891 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
892 } while (ret == -EAGAIN && ctr++ < 10);
893
894 if (ret == -EAGAIN) {
895 qat_alg_free_bufl(ctx->inst, qat_req);
896 return -EBUSY;
897 }
898 return -EINPROGRESS;
899}
900
901static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
902 const u8 *key, unsigned int keylen,
903 int mode)
904{
905 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
906 struct device *dev;
907
908 spin_lock(&ctx->lock);
909 if (ctx->enc_cd) {
910
911 dev = &GET_DEV(ctx->inst->accel_dev);
912 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
913 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
914 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
915 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
916 } else {
917
918 int node = get_current_node();
919 struct qat_crypto_instance *inst =
920 qat_crypto_get_instance_node(node);
921 if (!inst) {
922 spin_unlock(&ctx->lock);
923 return -EINVAL;
924 }
925
926 dev = &GET_DEV(inst->accel_dev);
927 ctx->inst = inst;
928 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
929 &ctx->enc_cd_paddr,
930 GFP_ATOMIC);
931 if (!ctx->enc_cd) {
932 spin_unlock(&ctx->lock);
933 return -ENOMEM;
934 }
935 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
936 &ctx->dec_cd_paddr,
937 GFP_ATOMIC);
938 if (!ctx->dec_cd) {
939 spin_unlock(&ctx->lock);
940 goto out_free_enc;
941 }
942 }
943 spin_unlock(&ctx->lock);
944 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
945 goto out_free_all;
946
947 return 0;
948
949out_free_all:
950 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
951 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
952 ctx->dec_cd, ctx->dec_cd_paddr);
953 ctx->dec_cd = NULL;
954out_free_enc:
955 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
956 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
957 ctx->enc_cd, ctx->enc_cd_paddr);
958 ctx->enc_cd = NULL;
959 return -ENOMEM;
960}
961
962static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
963 const u8 *key, unsigned int keylen)
964{
965 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
966 ICP_QAT_HW_CIPHER_CBC_MODE);
967}
968
969static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
970 const u8 *key, unsigned int keylen)
971{
972 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
973 ICP_QAT_HW_CIPHER_CTR_MODE);
974}
975
976static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
977 const u8 *key, unsigned int keylen)
978{
979 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
980 ICP_QAT_HW_CIPHER_XTS_MODE);
981}
982
983static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
984{
985 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
986 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
987 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
988 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
989 struct icp_qat_fw_la_cipher_req_params *cipher_param;
990 struct icp_qat_fw_la_bulk_req *msg;
991 int ret, ctr = 0;
992
993 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
994 if (unlikely(ret))
995 return ret;
996
997 msg = &qat_req->req;
998 *msg = ctx->enc_fw_req;
999 qat_req->ablkcipher_ctx = ctx;
1000 qat_req->ablkcipher_req = req;
1001 qat_req->cb = qat_ablkcipher_alg_callback;
1002 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1003 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1004 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1005 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1006 cipher_param->cipher_length = req->nbytes;
1007 cipher_param->cipher_offset = 0;
1008 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1009 do {
1010 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1011 } while (ret == -EAGAIN && ctr++ < 10);
1012
1013 if (ret == -EAGAIN) {
1014 qat_alg_free_bufl(ctx->inst, qat_req);
1015 return -EBUSY;
1016 }
1017 return -EINPROGRESS;
1018}
1019
1020static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1021{
1022 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1023 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1024 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1025 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1026 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1027 struct icp_qat_fw_la_bulk_req *msg;
1028 int ret, ctr = 0;
1029
1030 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1031 if (unlikely(ret))
1032 return ret;
1033
1034 msg = &qat_req->req;
1035 *msg = ctx->dec_fw_req;
1036 qat_req->ablkcipher_ctx = ctx;
1037 qat_req->ablkcipher_req = req;
1038 qat_req->cb = qat_ablkcipher_alg_callback;
1039 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1040 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1041 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1042 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1043 cipher_param->cipher_length = req->nbytes;
1044 cipher_param->cipher_offset = 0;
1045 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1046 do {
1047 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1048 } while (ret == -EAGAIN && ctr++ < 10);
1049
1050 if (ret == -EAGAIN) {
1051 qat_alg_free_bufl(ctx->inst, qat_req);
1052 return -EBUSY;
1053 }
1054 return -EINPROGRESS;
1055}
1056
1057static int qat_alg_aead_init(struct crypto_aead *tfm,
1058 enum icp_qat_hw_auth_algo hash,
1059 const char *hash_name)
1060{
1061 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1062
1063 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1064 if (IS_ERR(ctx->hash_tfm))
1065 return PTR_ERR(ctx->hash_tfm);
1066 ctx->qat_hash_alg = hash;
1067 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1068 return 0;
1069}
1070
1071static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1072{
1073 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1074}
1075
1076static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1077{
1078 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1079}
1080
1081static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1082{
1083 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1084}
1085
1086static void qat_alg_aead_exit(struct crypto_aead *tfm)
1087{
1088 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1089 struct qat_crypto_instance *inst = ctx->inst;
1090 struct device *dev;
1091
1092 crypto_free_shash(ctx->hash_tfm);
1093
1094 if (!inst)
1095 return;
1096
1097 dev = &GET_DEV(inst->accel_dev);
1098 if (ctx->enc_cd) {
1099 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1100 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1101 ctx->enc_cd, ctx->enc_cd_paddr);
1102 }
1103 if (ctx->dec_cd) {
1104 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1105 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1106 ctx->dec_cd, ctx->dec_cd_paddr);
1107 }
1108 qat_crypto_put_instance(inst);
1109}
1110
1111static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1112{
1113 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1114
1115 spin_lock_init(&ctx->lock);
1116 tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1117 ctx->tfm = tfm;
1118 return 0;
1119}
1120
1121static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1122{
1123 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1124 struct qat_crypto_instance *inst = ctx->inst;
1125 struct device *dev;
1126
1127 if (!inst)
1128 return;
1129
1130 dev = &GET_DEV(inst->accel_dev);
1131 if (ctx->enc_cd) {
1132 memset(ctx->enc_cd, 0,
1133 sizeof(struct icp_qat_hw_cipher_algo_blk));
1134 dma_free_coherent(dev,
1135 sizeof(struct icp_qat_hw_cipher_algo_blk),
1136 ctx->enc_cd, ctx->enc_cd_paddr);
1137 }
1138 if (ctx->dec_cd) {
1139 memset(ctx->dec_cd, 0,
1140 sizeof(struct icp_qat_hw_cipher_algo_blk));
1141 dma_free_coherent(dev,
1142 sizeof(struct icp_qat_hw_cipher_algo_blk),
1143 ctx->dec_cd, ctx->dec_cd_paddr);
1144 }
1145 qat_crypto_put_instance(inst);
1146}
1147
1148
1149static struct aead_alg qat_aeads[] = { {
1150 .base = {
1151 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1152 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1153 .cra_priority = 4001,
1154 .cra_flags = CRYPTO_ALG_ASYNC,
1155 .cra_blocksize = AES_BLOCK_SIZE,
1156 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1157 .cra_module = THIS_MODULE,
1158 },
1159 .init = qat_alg_aead_sha1_init,
1160 .exit = qat_alg_aead_exit,
1161 .setkey = qat_alg_aead_setkey,
1162 .decrypt = qat_alg_aead_dec,
1163 .encrypt = qat_alg_aead_enc,
1164 .ivsize = AES_BLOCK_SIZE,
1165 .maxauthsize = SHA1_DIGEST_SIZE,
1166}, {
1167 .base = {
1168 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1169 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1170 .cra_priority = 4001,
1171 .cra_flags = CRYPTO_ALG_ASYNC,
1172 .cra_blocksize = AES_BLOCK_SIZE,
1173 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1174 .cra_module = THIS_MODULE,
1175 },
1176 .init = qat_alg_aead_sha256_init,
1177 .exit = qat_alg_aead_exit,
1178 .setkey = qat_alg_aead_setkey,
1179 .decrypt = qat_alg_aead_dec,
1180 .encrypt = qat_alg_aead_enc,
1181 .ivsize = AES_BLOCK_SIZE,
1182 .maxauthsize = SHA256_DIGEST_SIZE,
1183}, {
1184 .base = {
1185 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1186 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1187 .cra_priority = 4001,
1188 .cra_flags = CRYPTO_ALG_ASYNC,
1189 .cra_blocksize = AES_BLOCK_SIZE,
1190 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1191 .cra_module = THIS_MODULE,
1192 },
1193 .init = qat_alg_aead_sha512_init,
1194 .exit = qat_alg_aead_exit,
1195 .setkey = qat_alg_aead_setkey,
1196 .decrypt = qat_alg_aead_dec,
1197 .encrypt = qat_alg_aead_enc,
1198 .ivsize = AES_BLOCK_SIZE,
1199 .maxauthsize = SHA512_DIGEST_SIZE,
1200} };
1201
1202static struct crypto_alg qat_algs[] = { {
1203 .cra_name = "cbc(aes)",
1204 .cra_driver_name = "qat_aes_cbc",
1205 .cra_priority = 4001,
1206 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1207 .cra_blocksize = AES_BLOCK_SIZE,
1208 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1209 .cra_alignmask = 0,
1210 .cra_type = &crypto_ablkcipher_type,
1211 .cra_module = THIS_MODULE,
1212 .cra_init = qat_alg_ablkcipher_init,
1213 .cra_exit = qat_alg_ablkcipher_exit,
1214 .cra_u = {
1215 .ablkcipher = {
1216 .setkey = qat_alg_ablkcipher_cbc_setkey,
1217 .decrypt = qat_alg_ablkcipher_decrypt,
1218 .encrypt = qat_alg_ablkcipher_encrypt,
1219 .min_keysize = AES_MIN_KEY_SIZE,
1220 .max_keysize = AES_MAX_KEY_SIZE,
1221 .ivsize = AES_BLOCK_SIZE,
1222 },
1223 },
1224}, {
1225 .cra_name = "ctr(aes)",
1226 .cra_driver_name = "qat_aes_ctr",
1227 .cra_priority = 4001,
1228 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1229 .cra_blocksize = AES_BLOCK_SIZE,
1230 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1231 .cra_alignmask = 0,
1232 .cra_type = &crypto_ablkcipher_type,
1233 .cra_module = THIS_MODULE,
1234 .cra_init = qat_alg_ablkcipher_init,
1235 .cra_exit = qat_alg_ablkcipher_exit,
1236 .cra_u = {
1237 .ablkcipher = {
1238 .setkey = qat_alg_ablkcipher_ctr_setkey,
1239 .decrypt = qat_alg_ablkcipher_decrypt,
1240 .encrypt = qat_alg_ablkcipher_encrypt,
1241 .min_keysize = AES_MIN_KEY_SIZE,
1242 .max_keysize = AES_MAX_KEY_SIZE,
1243 .ivsize = AES_BLOCK_SIZE,
1244 },
1245 },
1246}, {
1247 .cra_name = "xts(aes)",
1248 .cra_driver_name = "qat_aes_xts",
1249 .cra_priority = 4001,
1250 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1251 .cra_blocksize = AES_BLOCK_SIZE,
1252 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1253 .cra_alignmask = 0,
1254 .cra_type = &crypto_ablkcipher_type,
1255 .cra_module = THIS_MODULE,
1256 .cra_init = qat_alg_ablkcipher_init,
1257 .cra_exit = qat_alg_ablkcipher_exit,
1258 .cra_u = {
1259 .ablkcipher = {
1260 .setkey = qat_alg_ablkcipher_xts_setkey,
1261 .decrypt = qat_alg_ablkcipher_decrypt,
1262 .encrypt = qat_alg_ablkcipher_encrypt,
1263 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1264 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1265 .ivsize = AES_BLOCK_SIZE,
1266 },
1267 },
1268} };
1269
1270int qat_algs_register(void)
1271{
1272 int ret = 0, i;
1273
1274 mutex_lock(&algs_lock);
1275 if (++active_devs != 1)
1276 goto unlock;
1277
1278 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1279 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1280
1281 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1282 if (ret)
1283 goto unlock;
1284
1285 for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1286 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
1287
1288 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1289 if (ret)
1290 goto unreg_algs;
1291
1292unlock:
1293 mutex_unlock(&algs_lock);
1294 return ret;
1295
1296unreg_algs:
1297 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1298 goto unlock;
1299}
1300
1301void qat_algs_unregister(void)
1302{
1303 mutex_lock(&algs_lock);
1304 if (--active_devs != 0)
1305 goto unlock;
1306
1307 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1308 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1309
1310unlock:
1311 mutex_unlock(&algs_lock);
1312}
1313