1
2
3
4#include <crypto/aes.h>
5#include <crypto/algapi.h>
6#include <crypto/authenc.h>
7#include <crypto/des.h>
8#include <crypto/hash.h>
9#include <crypto/internal/aead.h>
10#include <crypto/sha.h>
11#include <crypto/skcipher.h>
12#include <crypto/xts.h>
13#include <linux/crypto.h>
14#include <linux/dma-mapping.h>
15#include <linux/idr.h>
16
17#include "sec.h"
18#include "sec_crypto.h"
19
20#define SEC_PRIORITY 4001
21#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
22#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
23#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
24#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
25
26
27#define SEC_DE_OFFSET 1
28#define SEC_CIPHER_OFFSET 4
29#define SEC_SCENE_OFFSET 3
30#define SEC_DST_SGL_OFFSET 2
31#define SEC_SRC_SGL_OFFSET 7
32#define SEC_CKEY_OFFSET 9
33#define SEC_CMODE_OFFSET 12
34#define SEC_AKEY_OFFSET 5
35#define SEC_AEAD_ALG_OFFSET 11
36#define SEC_AUTH_OFFSET 6
37
38#define SEC_FLAG_OFFSET 7
39#define SEC_FLAG_MASK 0x0780
40#define SEC_TYPE_MASK 0x0F
41#define SEC_DONE_MASK 0x0001
42
43#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
44#define SEC_SGL_SGE_NR 128
45#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
46#define SEC_CIPHER_AUTH 0xfe
47#define SEC_AUTH_CIPHER 0x1
48#define SEC_MAX_MAC_LEN 64
49#define SEC_MAX_AAD_LEN 65535
50#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
51
52#define SEC_PBUF_SZ 512
53#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
54#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
55#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
56 SEC_MAX_MAC_LEN * 2)
57#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
58#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
59#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
60 SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
61#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
62 SEC_PBUF_LEFT_SZ)
63
64#define SEC_SQE_LEN_RATE 4
65#define SEC_SQE_CFLAG 2
66#define SEC_SQE_AEAD_FLAG 3
67#define SEC_SQE_DONE 0x1
68
69
70static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
71{
72 if (req->c_req.encrypt)
73 return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
74 ctx->hlf_q_num;
75
76 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
77 ctx->hlf_q_num;
78}
79
80static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
81{
82 if (req->c_req.encrypt)
83 atomic_dec(&ctx->enc_qcyclic);
84 else
85 atomic_dec(&ctx->dec_qcyclic);
86}
87
88static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
89{
90 int req_id;
91
92 mutex_lock(&qp_ctx->req_lock);
93
94 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
95 0, QM_Q_DEPTH, GFP_ATOMIC);
96 mutex_unlock(&qp_ctx->req_lock);
97 if (unlikely(req_id < 0)) {
98 dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
99 return req_id;
100 }
101
102 req->qp_ctx = qp_ctx;
103 qp_ctx->req_list[req_id] = req;
104 return req_id;
105}
106
107static void sec_free_req_id(struct sec_req *req)
108{
109 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
110 int req_id = req->req_id;
111
112 if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
113 dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
114 return;
115 }
116
117 qp_ctx->req_list[req_id] = NULL;
118 req->qp_ctx = NULL;
119
120 mutex_lock(&qp_ctx->req_lock);
121 idr_remove(&qp_ctx->req_idr, req_id);
122 mutex_unlock(&qp_ctx->req_lock);
123}
124
125static int sec_aead_verify(struct sec_req *req)
126{
127 struct aead_request *aead_req = req->aead_req.aead_req;
128 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
129 size_t authsize = crypto_aead_authsize(tfm);
130 u8 *mac_out = req->aead_req.out_mac;
131 u8 *mac = mac_out + SEC_MAX_MAC_LEN;
132 struct scatterlist *sgl = aead_req->src;
133 size_t sz;
134
135 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize,
136 aead_req->cryptlen + aead_req->assoclen -
137 authsize);
138 if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
139 dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n");
140 return -EBADMSG;
141 }
142
143 return 0;
144}
145
146static void sec_req_cb(struct hisi_qp *qp, void *resp)
147{
148 struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
149 struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
150 struct sec_sqe *bd = resp;
151 struct sec_ctx *ctx;
152 struct sec_req *req;
153 u16 done, flag;
154 int err = 0;
155 u8 type;
156
157 type = bd->type_cipher_auth & SEC_TYPE_MASK;
158 if (unlikely(type != SEC_BD_TYPE2)) {
159 atomic64_inc(&dfx->err_bd_cnt);
160 pr_err("err bd type [%d]\n", type);
161 return;
162 }
163
164 req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
165 if (unlikely(!req)) {
166 atomic64_inc(&dfx->invalid_req_cnt);
167 atomic_inc(&qp->qp_status.used);
168 return;
169 }
170 req->err_type = bd->type2.error_type;
171 ctx = req->ctx;
172 done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
173 flag = (le16_to_cpu(bd->type2.done_flag) &
174 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
175 if (unlikely(req->err_type || done != SEC_SQE_DONE ||
176 (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
177 (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
178 dev_err(SEC_CTX_DEV(ctx),
179 "err_type[%d],done[%d],flag[%d]\n",
180 req->err_type, done, flag);
181 err = -EIO;
182 atomic64_inc(&dfx->done_flag_cnt);
183 }
184
185 if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
186 err = sec_aead_verify(req);
187
188 atomic64_inc(&dfx->recv_cnt);
189
190 ctx->req_op->buf_unmap(ctx, req);
191
192 ctx->req_op->callback(ctx, req, err);
193}
194
195static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
196{
197 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
198 int ret;
199
200 if (ctx->fake_req_limit <=
201 atomic_read(&qp_ctx->qp->qp_status.used) &&
202 !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
203 return -EBUSY;
204
205 mutex_lock(&qp_ctx->req_lock);
206 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
207
208 if (ctx->fake_req_limit <=
209 atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
210 list_add_tail(&req->backlog_head, &qp_ctx->backlog);
211 atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
212 atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
213 mutex_unlock(&qp_ctx->req_lock);
214 return -EBUSY;
215 }
216 mutex_unlock(&qp_ctx->req_lock);
217
218 if (unlikely(ret == -EBUSY))
219 return -ENOBUFS;
220
221 if (likely(!ret)) {
222 ret = -EINPROGRESS;
223 atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
224 }
225
226 return ret;
227}
228
229
230static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
231{
232 int i;
233
234 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
235 &res->c_ivin_dma, GFP_KERNEL);
236 if (!res->c_ivin)
237 return -ENOMEM;
238
239 for (i = 1; i < QM_Q_DEPTH; i++) {
240 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
241 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
242 }
243
244 return 0;
245}
246
247static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
248{
249 if (res->c_ivin)
250 dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
251 res->c_ivin, res->c_ivin_dma);
252}
253
254static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
255{
256 int i;
257
258 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
259 &res->out_mac_dma, GFP_KERNEL);
260 if (!res->out_mac)
261 return -ENOMEM;
262
263 for (i = 1; i < QM_Q_DEPTH; i++) {
264 res[i].out_mac_dma = res->out_mac_dma +
265 i * (SEC_MAX_MAC_LEN << 1);
266 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
267 }
268
269 return 0;
270}
271
272static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
273{
274 if (res->out_mac)
275 dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
276 res->out_mac, res->out_mac_dma);
277}
278
279static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
280{
281 if (res->pbuf)
282 dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
283 res->pbuf, res->pbuf_dma);
284}
285
286
287
288
289
290static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
291{
292 int pbuf_page_offset;
293 int i, j, k;
294
295 res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
296 &res->pbuf_dma, GFP_KERNEL);
297 if (!res->pbuf)
298 return -ENOMEM;
299
300
301
302
303
304
305
306
307
308 for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
309 pbuf_page_offset = PAGE_SIZE * i;
310 for (j = 0; j < SEC_PBUF_NUM; j++) {
311 k = i * SEC_PBUF_NUM + j;
312 if (k == QM_Q_DEPTH)
313 break;
314 res[k].pbuf = res->pbuf +
315 j * SEC_PBUF_PKG + pbuf_page_offset;
316 res[k].pbuf_dma = res->pbuf_dma +
317 j * SEC_PBUF_PKG + pbuf_page_offset;
318 }
319 }
320 return 0;
321}
322
323static int sec_alg_resource_alloc(struct sec_ctx *ctx,
324 struct sec_qp_ctx *qp_ctx)
325{
326 struct device *dev = SEC_CTX_DEV(ctx);
327 struct sec_alg_res *res = qp_ctx->res;
328 int ret;
329
330 ret = sec_alloc_civ_resource(dev, res);
331 if (ret)
332 return ret;
333
334 if (ctx->alg_type == SEC_AEAD) {
335 ret = sec_alloc_mac_resource(dev, res);
336 if (ret)
337 goto alloc_fail;
338 }
339 if (ctx->pbuf_supported) {
340 ret = sec_alloc_pbuf_resource(dev, res);
341 if (ret) {
342 dev_err(dev, "fail to alloc pbuf dma resource!\n");
343 goto alloc_pbuf_fail;
344 }
345 }
346
347 return 0;
348alloc_pbuf_fail:
349 if (ctx->alg_type == SEC_AEAD)
350 sec_free_mac_resource(dev, qp_ctx->res);
351alloc_fail:
352 sec_free_civ_resource(dev, res);
353
354 return ret;
355}
356
357static void sec_alg_resource_free(struct sec_ctx *ctx,
358 struct sec_qp_ctx *qp_ctx)
359{
360 struct device *dev = SEC_CTX_DEV(ctx);
361
362 sec_free_civ_resource(dev, qp_ctx->res);
363
364 if (ctx->pbuf_supported)
365 sec_free_pbuf_resource(dev, qp_ctx->res);
366 if (ctx->alg_type == SEC_AEAD)
367 sec_free_mac_resource(dev, qp_ctx->res);
368}
369
370static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
371 int qp_ctx_id, int alg_type)
372{
373 struct device *dev = SEC_CTX_DEV(ctx);
374 struct sec_qp_ctx *qp_ctx;
375 struct hisi_qp *qp;
376 int ret = -ENOMEM;
377
378 qp_ctx = &ctx->qp_ctx[qp_ctx_id];
379 qp = ctx->qps[qp_ctx_id];
380 qp->req_type = 0;
381 qp->qp_ctx = qp_ctx;
382 qp->req_cb = sec_req_cb;
383 qp_ctx->qp = qp;
384 qp_ctx->ctx = ctx;
385
386 mutex_init(&qp_ctx->req_lock);
387 idr_init(&qp_ctx->req_idr);
388 INIT_LIST_HEAD(&qp_ctx->backlog);
389
390 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
391 SEC_SGL_SGE_NR);
392 if (IS_ERR(qp_ctx->c_in_pool)) {
393 dev_err(dev, "fail to create sgl pool for input!\n");
394 goto err_destroy_idr;
395 }
396
397 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
398 SEC_SGL_SGE_NR);
399 if (IS_ERR(qp_ctx->c_out_pool)) {
400 dev_err(dev, "fail to create sgl pool for output!\n");
401 goto err_free_c_in_pool;
402 }
403
404 ret = sec_alg_resource_alloc(ctx, qp_ctx);
405 if (ret)
406 goto err_free_c_out_pool;
407
408 ret = hisi_qm_start_qp(qp, 0);
409 if (ret < 0)
410 goto err_queue_free;
411
412 return 0;
413
414err_queue_free:
415 sec_alg_resource_free(ctx, qp_ctx);
416err_free_c_out_pool:
417 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
418err_free_c_in_pool:
419 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
420err_destroy_idr:
421 idr_destroy(&qp_ctx->req_idr);
422
423 return ret;
424}
425
426static void sec_release_qp_ctx(struct sec_ctx *ctx,
427 struct sec_qp_ctx *qp_ctx)
428{
429 struct device *dev = SEC_CTX_DEV(ctx);
430
431 hisi_qm_stop_qp(qp_ctx->qp);
432 sec_alg_resource_free(ctx, qp_ctx);
433
434 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
435 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
436
437 idr_destroy(&qp_ctx->req_idr);
438}
439
440static int sec_ctx_base_init(struct sec_ctx *ctx)
441{
442 struct sec_dev *sec;
443 int i, ret;
444
445 ctx->qps = sec_create_qps();
446 if (!ctx->qps) {
447 pr_err("Can not create sec qps!\n");
448 return -ENODEV;
449 }
450
451 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
452 ctx->sec = sec;
453 ctx->hlf_q_num = sec->ctx_q_num >> 1;
454
455 ctx->pbuf_supported = ctx->sec->iommu_used;
456
457
458 ctx->fake_req_limit = QM_Q_DEPTH >> 1;
459 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
460 GFP_KERNEL);
461 if (!ctx->qp_ctx) {
462 ret = -ENOMEM;
463 goto err_destroy_qps;
464 }
465
466 for (i = 0; i < sec->ctx_q_num; i++) {
467 ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
468 if (ret)
469 goto err_sec_release_qp_ctx;
470 }
471
472 return 0;
473
474err_sec_release_qp_ctx:
475 for (i = i - 1; i >= 0; i--)
476 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
477
478 kfree(ctx->qp_ctx);
479err_destroy_qps:
480 sec_destroy_qps(ctx->qps, sec->ctx_q_num);
481
482 return ret;
483}
484
485static void sec_ctx_base_uninit(struct sec_ctx *ctx)
486{
487 int i;
488
489 for (i = 0; i < ctx->sec->ctx_q_num; i++)
490 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
491
492 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
493 kfree(ctx->qp_ctx);
494}
495
496static int sec_cipher_init(struct sec_ctx *ctx)
497{
498 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
499
500 c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
501 &c_ctx->c_key_dma, GFP_KERNEL);
502 if (!c_ctx->c_key)
503 return -ENOMEM;
504
505 return 0;
506}
507
508static void sec_cipher_uninit(struct sec_ctx *ctx)
509{
510 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
511
512 memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
513 dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
514 c_ctx->c_key, c_ctx->c_key_dma);
515}
516
517static int sec_auth_init(struct sec_ctx *ctx)
518{
519 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
520
521 a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
522 &a_ctx->a_key_dma, GFP_KERNEL);
523 if (!a_ctx->a_key)
524 return -ENOMEM;
525
526 return 0;
527}
528
529static void sec_auth_uninit(struct sec_ctx *ctx)
530{
531 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
532
533 memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
534 dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
535 a_ctx->a_key, a_ctx->a_key_dma);
536}
537
538static int sec_skcipher_init(struct crypto_skcipher *tfm)
539{
540 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
541 int ret;
542
543 ctx->alg_type = SEC_SKCIPHER;
544 crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
545 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
546 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
547 dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
548 return -EINVAL;
549 }
550
551 ret = sec_ctx_base_init(ctx);
552 if (ret)
553 return ret;
554
555 ret = sec_cipher_init(ctx);
556 if (ret)
557 goto err_cipher_init;
558
559 return 0;
560err_cipher_init:
561 sec_ctx_base_uninit(ctx);
562
563 return ret;
564}
565
566static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
567{
568 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
569
570 sec_cipher_uninit(ctx);
571 sec_ctx_base_uninit(ctx);
572}
573
574static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
575 const u32 keylen,
576 const enum sec_cmode c_mode)
577{
578 switch (keylen) {
579 case SEC_DES3_2KEY_SIZE:
580 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
581 break;
582 case SEC_DES3_3KEY_SIZE:
583 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
584 break;
585 default:
586 return -EINVAL;
587 }
588
589 return 0;
590}
591
592static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
593 const u32 keylen,
594 const enum sec_cmode c_mode)
595{
596 if (c_mode == SEC_CMODE_XTS) {
597 switch (keylen) {
598 case SEC_XTS_MIN_KEY_SIZE:
599 c_ctx->c_key_len = SEC_CKEY_128BIT;
600 break;
601 case SEC_XTS_MAX_KEY_SIZE:
602 c_ctx->c_key_len = SEC_CKEY_256BIT;
603 break;
604 default:
605 pr_err("hisi_sec2: xts mode key error!\n");
606 return -EINVAL;
607 }
608 } else {
609 switch (keylen) {
610 case AES_KEYSIZE_128:
611 c_ctx->c_key_len = SEC_CKEY_128BIT;
612 break;
613 case AES_KEYSIZE_192:
614 c_ctx->c_key_len = SEC_CKEY_192BIT;
615 break;
616 case AES_KEYSIZE_256:
617 c_ctx->c_key_len = SEC_CKEY_256BIT;
618 break;
619 default:
620 pr_err("hisi_sec2: aes key error!\n");
621 return -EINVAL;
622 }
623 }
624
625 return 0;
626}
627
628static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
629 const u32 keylen, const enum sec_calg c_alg,
630 const enum sec_cmode c_mode)
631{
632 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
633 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
634 int ret;
635
636 if (c_mode == SEC_CMODE_XTS) {
637 ret = xts_verify_key(tfm, key, keylen);
638 if (ret) {
639 dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
640 return ret;
641 }
642 }
643
644 c_ctx->c_alg = c_alg;
645 c_ctx->c_mode = c_mode;
646
647 switch (c_alg) {
648 case SEC_CALG_3DES:
649 ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
650 break;
651 case SEC_CALG_AES:
652 case SEC_CALG_SM4:
653 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
654 break;
655 default:
656 return -EINVAL;
657 }
658
659 if (ret) {
660 dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
661 return ret;
662 }
663
664 memcpy(c_ctx->c_key, key, keylen);
665
666 return 0;
667}
668
669#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
670static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
671 u32 keylen) \
672{ \
673 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
674}
675
676GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
677GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
678GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
679
680GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
681GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
682
683GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
684GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
685
686static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
687 struct scatterlist *src)
688{
689 struct aead_request *aead_req = req->aead_req.aead_req;
690 struct sec_cipher_req *c_req = &req->c_req;
691 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
692 struct device *dev = SEC_CTX_DEV(ctx);
693 int copy_size, pbuf_length;
694 int req_id = req->req_id;
695
696 if (ctx->alg_type == SEC_AEAD)
697 copy_size = aead_req->cryptlen + aead_req->assoclen;
698 else
699 copy_size = c_req->c_len;
700
701 pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
702 qp_ctx->res[req_id].pbuf,
703 copy_size);
704
705 if (unlikely(pbuf_length != copy_size)) {
706 dev_err(dev, "copy src data to pbuf error!\n");
707 return -EINVAL;
708 }
709
710 c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
711
712 if (!c_req->c_in_dma) {
713 dev_err(dev, "fail to set pbuffer address!\n");
714 return -ENOMEM;
715 }
716
717 c_req->c_out_dma = c_req->c_in_dma;
718
719 return 0;
720}
721
722static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
723 struct scatterlist *dst)
724{
725 struct aead_request *aead_req = req->aead_req.aead_req;
726 struct sec_cipher_req *c_req = &req->c_req;
727 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
728 struct device *dev = SEC_CTX_DEV(ctx);
729 int copy_size, pbuf_length;
730 int req_id = req->req_id;
731
732 if (ctx->alg_type == SEC_AEAD)
733 copy_size = c_req->c_len + aead_req->assoclen;
734 else
735 copy_size = c_req->c_len;
736
737 pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
738 qp_ctx->res[req_id].pbuf,
739 copy_size);
740
741 if (unlikely(pbuf_length != copy_size))
742 dev_err(dev, "copy pbuf data to dst error!\n");
743
744}
745
746static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
747 struct scatterlist *src, struct scatterlist *dst)
748{
749 struct sec_cipher_req *c_req = &req->c_req;
750 struct sec_aead_req *a_req = &req->aead_req;
751 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
752 struct sec_alg_res *res = &qp_ctx->res[req->req_id];
753 struct device *dev = SEC_CTX_DEV(ctx);
754 int ret;
755
756 if (req->use_pbuf) {
757 ret = sec_cipher_pbuf_map(ctx, req, src);
758 c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
759 c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
760 if (ctx->alg_type == SEC_AEAD) {
761 a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
762 a_req->out_mac_dma = res->pbuf_dma +
763 SEC_PBUF_MAC_OFFSET;
764 }
765
766 return ret;
767 }
768 c_req->c_ivin = res->c_ivin;
769 c_req->c_ivin_dma = res->c_ivin_dma;
770 if (ctx->alg_type == SEC_AEAD) {
771 a_req->out_mac = res->out_mac;
772 a_req->out_mac_dma = res->out_mac_dma;
773 }
774
775 c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
776 qp_ctx->c_in_pool,
777 req->req_id,
778 &c_req->c_in_dma);
779
780 if (IS_ERR(c_req->c_in)) {
781 dev_err(dev, "fail to dma map input sgl buffers!\n");
782 return PTR_ERR(c_req->c_in);
783 }
784
785 if (dst == src) {
786 c_req->c_out = c_req->c_in;
787 c_req->c_out_dma = c_req->c_in_dma;
788 } else {
789 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
790 qp_ctx->c_out_pool,
791 req->req_id,
792 &c_req->c_out_dma);
793
794 if (IS_ERR(c_req->c_out)) {
795 dev_err(dev, "fail to dma map output sgl buffers!\n");
796 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
797 return PTR_ERR(c_req->c_out);
798 }
799 }
800
801 return 0;
802}
803
804static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
805 struct scatterlist *src, struct scatterlist *dst)
806{
807 struct sec_cipher_req *c_req = &req->c_req;
808 struct device *dev = SEC_CTX_DEV(ctx);
809
810 if (req->use_pbuf) {
811 sec_cipher_pbuf_unmap(ctx, req, dst);
812 } else {
813 if (dst != src)
814 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
815
816 hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
817 }
818}
819
820static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
821{
822 struct skcipher_request *sq = req->c_req.sk_req;
823
824 return sec_cipher_map(ctx, req, sq->src, sq->dst);
825}
826
827static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
828{
829 struct skcipher_request *sq = req->c_req.sk_req;
830
831 sec_cipher_unmap(ctx, req, sq->src, sq->dst);
832}
833
834static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
835 struct crypto_authenc_keys *keys)
836{
837 switch (keys->enckeylen) {
838 case AES_KEYSIZE_128:
839 c_ctx->c_key_len = SEC_CKEY_128BIT;
840 break;
841 case AES_KEYSIZE_192:
842 c_ctx->c_key_len = SEC_CKEY_192BIT;
843 break;
844 case AES_KEYSIZE_256:
845 c_ctx->c_key_len = SEC_CKEY_256BIT;
846 break;
847 default:
848 pr_err("hisi_sec2: aead aes key error!\n");
849 return -EINVAL;
850 }
851 memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
852
853 return 0;
854}
855
856static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
857 struct crypto_authenc_keys *keys)
858{
859 struct crypto_shash *hash_tfm = ctx->hash_tfm;
860 int blocksize, ret;
861
862 if (!keys->authkeylen) {
863 pr_err("hisi_sec2: aead auth key error!\n");
864 return -EINVAL;
865 }
866
867 blocksize = crypto_shash_blocksize(hash_tfm);
868 if (keys->authkeylen > blocksize) {
869 ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
870 keys->authkeylen, ctx->a_key);
871 if (ret) {
872 pr_err("hisi_sec2: aead auth digest error!\n");
873 return -EINVAL;
874 }
875 ctx->a_key_len = blocksize;
876 } else {
877 memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
878 ctx->a_key_len = keys->authkeylen;
879 }
880
881 return 0;
882}
883
884static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
885 const u32 keylen, const enum sec_hash_alg a_alg,
886 const enum sec_calg c_alg,
887 const enum sec_mac_len mac_len,
888 const enum sec_cmode c_mode)
889{
890 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
891 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
892 struct crypto_authenc_keys keys;
893 int ret;
894
895 ctx->a_ctx.a_alg = a_alg;
896 ctx->c_ctx.c_alg = c_alg;
897 ctx->a_ctx.mac_len = mac_len;
898 c_ctx->c_mode = c_mode;
899
900 if (crypto_authenc_extractkeys(&keys, key, keylen))
901 goto bad_key;
902
903 ret = sec_aead_aes_set_key(c_ctx, &keys);
904 if (ret) {
905 dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n");
906 goto bad_key;
907 }
908
909 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
910 if (ret) {
911 dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n");
912 goto bad_key;
913 }
914
915 return 0;
916bad_key:
917 memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
918
919 return -EINVAL;
920}
921
922
923#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
924static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
925 u32 keylen) \
926{ \
927 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
928}
929
930GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
931 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
932GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
933 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
934GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
935 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
936
937static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
938{
939 struct aead_request *aq = req->aead_req.aead_req;
940
941 return sec_cipher_map(ctx, req, aq->src, aq->dst);
942}
943
944static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
945{
946 struct aead_request *aq = req->aead_req.aead_req;
947
948 sec_cipher_unmap(ctx, req, aq->src, aq->dst);
949}
950
951static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
952{
953 int ret;
954
955 ret = ctx->req_op->buf_map(ctx, req);
956 if (unlikely(ret))
957 return ret;
958
959 ctx->req_op->do_transfer(ctx, req);
960
961 ret = ctx->req_op->bd_fill(ctx, req);
962 if (unlikely(ret))
963 goto unmap_req_buf;
964
965 return ret;
966
967unmap_req_buf:
968 ctx->req_op->buf_unmap(ctx, req);
969
970 return ret;
971}
972
973static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
974{
975 ctx->req_op->buf_unmap(ctx, req);
976}
977
978static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
979{
980 struct skcipher_request *sk_req = req->c_req.sk_req;
981 struct sec_cipher_req *c_req = &req->c_req;
982
983 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
984}
985
986static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
987{
988 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
989 struct sec_cipher_req *c_req = &req->c_req;
990 struct sec_sqe *sec_sqe = &req->sec_sqe;
991 u8 scene, sa_type, da_type;
992 u8 bd_type, cipher;
993 u8 de = 0;
994
995 memset(sec_sqe, 0, sizeof(struct sec_sqe));
996
997 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
998 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
999 sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
1000 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1001
1002 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
1003 SEC_CMODE_OFFSET);
1004 sec_sqe->type2.c_alg = c_ctx->c_alg;
1005 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1006 SEC_CKEY_OFFSET);
1007
1008 bd_type = SEC_BD_TYPE2;
1009 if (c_req->encrypt)
1010 cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
1011 else
1012 cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
1013 sec_sqe->type_cipher_auth = bd_type | cipher;
1014
1015 if (req->use_pbuf)
1016 sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
1017 else
1018 sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
1019 scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
1020 if (c_req->c_in_dma != c_req->c_out_dma)
1021 de = 0x1 << SEC_DE_OFFSET;
1022
1023 sec_sqe->sds_sa_type = (de | scene | sa_type);
1024
1025
1026 if (req->use_pbuf)
1027 da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
1028 else
1029 da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
1030 sec_sqe->sdm_addr_type |= da_type;
1031
1032 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
1033 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
1034
1035 return 0;
1036}
1037
1038static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1039{
1040 struct aead_request *aead_req = req->aead_req.aead_req;
1041 struct skcipher_request *sk_req = req->c_req.sk_req;
1042 u32 iv_size = req->ctx->c_ctx.ivsize;
1043 struct scatterlist *sgl;
1044 unsigned int cryptlen;
1045 size_t sz;
1046 u8 *iv;
1047
1048 if (req->c_req.encrypt)
1049 sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
1050 else
1051 sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
1052
1053 if (alg_type == SEC_SKCIPHER) {
1054 iv = sk_req->iv;
1055 cryptlen = sk_req->cryptlen;
1056 } else {
1057 iv = aead_req->iv;
1058 cryptlen = aead_req->cryptlen;
1059 }
1060
1061 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
1062 cryptlen - iv_size);
1063 if (unlikely(sz != iv_size))
1064 dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
1065}
1066
1067static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
1068 struct sec_qp_ctx *qp_ctx)
1069{
1070 struct sec_req *backlog_req = NULL;
1071
1072 mutex_lock(&qp_ctx->req_lock);
1073 if (ctx->fake_req_limit >=
1074 atomic_read(&qp_ctx->qp->qp_status.used) &&
1075 !list_empty(&qp_ctx->backlog)) {
1076 backlog_req = list_first_entry(&qp_ctx->backlog,
1077 typeof(*backlog_req), backlog_head);
1078 list_del(&backlog_req->backlog_head);
1079 }
1080 mutex_unlock(&qp_ctx->req_lock);
1081
1082 return backlog_req;
1083}
1084
1085static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1086 int err)
1087{
1088 struct skcipher_request *sk_req = req->c_req.sk_req;
1089 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1090 struct skcipher_request *backlog_sk_req;
1091 struct sec_req *backlog_req;
1092
1093 sec_free_req_id(req);
1094
1095
1096 if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
1097 sec_update_iv(req, SEC_SKCIPHER);
1098
1099 while (1) {
1100 backlog_req = sec_back_req_clear(ctx, qp_ctx);
1101 if (!backlog_req)
1102 break;
1103
1104 backlog_sk_req = backlog_req->c_req.sk_req;
1105 backlog_sk_req->base.complete(&backlog_sk_req->base,
1106 -EINPROGRESS);
1107 atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
1108 }
1109
1110
1111 sk_req->base.complete(&sk_req->base, err);
1112}
1113
1114static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1115{
1116 struct aead_request *aead_req = req->aead_req.aead_req;
1117 struct sec_cipher_req *c_req = &req->c_req;
1118
1119 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
1120}
1121
1122static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
1123 struct sec_req *req, struct sec_sqe *sec_sqe)
1124{
1125 struct sec_aead_req *a_req = &req->aead_req;
1126 struct sec_cipher_req *c_req = &req->c_req;
1127 struct aead_request *aq = a_req->aead_req;
1128
1129 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
1130
1131 sec_sqe->type2.mac_key_alg =
1132 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
1133
1134 sec_sqe->type2.mac_key_alg |=
1135 cpu_to_le32((u32)((ctx->a_key_len) /
1136 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
1137
1138 sec_sqe->type2.mac_key_alg |=
1139 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
1140
1141 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
1142
1143 if (dir)
1144 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1145 else
1146 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1147
1148 sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
1149
1150 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1151
1152 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1153}
1154
1155static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1156{
1157 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1158 struct sec_sqe *sec_sqe = &req->sec_sqe;
1159 int ret;
1160
1161 ret = sec_skcipher_bd_fill(ctx, req);
1162 if (unlikely(ret)) {
1163 dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n");
1164 return ret;
1165 }
1166
1167 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1168
1169 return 0;
1170}
1171
1172static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
1173{
1174 struct aead_request *a_req = req->aead_req.aead_req;
1175 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1176 struct sec_aead_req *aead_req = &req->aead_req;
1177 struct sec_cipher_req *c_req = &req->c_req;
1178 size_t authsize = crypto_aead_authsize(tfm);
1179 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1180 struct aead_request *backlog_aead_req;
1181 struct sec_req *backlog_req;
1182 size_t sz;
1183
1184 if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
1185 sec_update_iv(req, SEC_AEAD);
1186
1187
1188 if (!err && c_req->encrypt) {
1189 struct scatterlist *sgl = a_req->dst;
1190
1191 sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
1192 aead_req->out_mac,
1193 authsize, a_req->cryptlen +
1194 a_req->assoclen);
1195
1196 if (unlikely(sz != authsize)) {
1197 dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n");
1198 err = -EINVAL;
1199 }
1200 }
1201
1202 sec_free_req_id(req);
1203
1204 while (1) {
1205 backlog_req = sec_back_req_clear(c, qp_ctx);
1206 if (!backlog_req)
1207 break;
1208
1209 backlog_aead_req = backlog_req->aead_req.aead_req;
1210 backlog_aead_req->base.complete(&backlog_aead_req->base,
1211 -EINPROGRESS);
1212 atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
1213 }
1214
1215 a_req->base.complete(&a_req->base, err);
1216}
1217
1218static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
1219{
1220 sec_free_req_id(req);
1221 sec_free_queue_id(ctx, req);
1222}
1223
1224static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1225{
1226 struct sec_qp_ctx *qp_ctx;
1227 int queue_id;
1228
1229
1230 queue_id = sec_alloc_queue_id(ctx, req);
1231 qp_ctx = &ctx->qp_ctx[queue_id];
1232
1233 req->req_id = sec_alloc_req_id(req, qp_ctx);
1234 if (unlikely(req->req_id < 0)) {
1235 sec_free_queue_id(ctx, req);
1236 return req->req_id;
1237 }
1238
1239 return 0;
1240}
1241
1242static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1243{
1244 struct sec_cipher_req *c_req = &req->c_req;
1245 int ret;
1246
1247 ret = sec_request_init(ctx, req);
1248 if (unlikely(ret))
1249 return ret;
1250
1251 ret = sec_request_transfer(ctx, req);
1252 if (unlikely(ret))
1253 goto err_uninit_req;
1254
1255
1256 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
1257 sec_update_iv(req, ctx->alg_type);
1258
1259 ret = ctx->req_op->bd_send(ctx, req);
1260 if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
1261 (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1262 dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
1263 goto err_send_req;
1264 }
1265
1266 return ret;
1267
1268err_send_req:
1269
1270 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1271 if (ctx->alg_type == SEC_SKCIPHER)
1272 memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
1273 ctx->c_ctx.ivsize);
1274 else
1275 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
1276 ctx->c_ctx.ivsize);
1277 }
1278
1279 sec_request_untransfer(ctx, req);
1280err_uninit_req:
1281 sec_request_uninit(ctx, req);
1282
1283 return ret;
1284}
1285
1286static const struct sec_req_op sec_skcipher_req_ops = {
1287 .buf_map = sec_skcipher_sgl_map,
1288 .buf_unmap = sec_skcipher_sgl_unmap,
1289 .do_transfer = sec_skcipher_copy_iv,
1290 .bd_fill = sec_skcipher_bd_fill,
1291 .bd_send = sec_bd_send,
1292 .callback = sec_skcipher_callback,
1293 .process = sec_process,
1294};
1295
1296static const struct sec_req_op sec_aead_req_ops = {
1297 .buf_map = sec_aead_sgl_map,
1298 .buf_unmap = sec_aead_sgl_unmap,
1299 .do_transfer = sec_aead_copy_iv,
1300 .bd_fill = sec_aead_bd_fill,
1301 .bd_send = sec_bd_send,
1302 .callback = sec_aead_callback,
1303 .process = sec_process,
1304};
1305
1306static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
1307{
1308 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1309
1310 ctx->req_op = &sec_skcipher_req_ops;
1311
1312 return sec_skcipher_init(tfm);
1313}
1314
1315static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
1316{
1317 sec_skcipher_uninit(tfm);
1318}
1319
1320static int sec_aead_init(struct crypto_aead *tfm)
1321{
1322 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1323 int ret;
1324
1325 crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
1326 ctx->alg_type = SEC_AEAD;
1327 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
1328 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
1329 dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n");
1330 return -EINVAL;
1331 }
1332
1333 ctx->req_op = &sec_aead_req_ops;
1334 ret = sec_ctx_base_init(ctx);
1335 if (ret)
1336 return ret;
1337
1338 ret = sec_auth_init(ctx);
1339 if (ret)
1340 goto err_auth_init;
1341
1342 ret = sec_cipher_init(ctx);
1343 if (ret)
1344 goto err_cipher_init;
1345
1346 return ret;
1347
1348err_cipher_init:
1349 sec_auth_uninit(ctx);
1350err_auth_init:
1351 sec_ctx_base_uninit(ctx);
1352
1353 return ret;
1354}
1355
1356static void sec_aead_exit(struct crypto_aead *tfm)
1357{
1358 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1359
1360 sec_cipher_uninit(ctx);
1361 sec_auth_uninit(ctx);
1362 sec_ctx_base_uninit(ctx);
1363}
1364
1365static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
1366{
1367 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1368 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1369 int ret;
1370
1371 ret = sec_aead_init(tfm);
1372 if (ret) {
1373 pr_err("hisi_sec2: aead init error!\n");
1374 return ret;
1375 }
1376
1377 auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1378 if (IS_ERR(auth_ctx->hash_tfm)) {
1379 dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n");
1380 sec_aead_exit(tfm);
1381 return PTR_ERR(auth_ctx->hash_tfm);
1382 }
1383
1384 return 0;
1385}
1386
1387static void sec_aead_ctx_exit(struct crypto_aead *tfm)
1388{
1389 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1390
1391 crypto_free_shash(ctx->a_ctx.hash_tfm);
1392 sec_aead_exit(tfm);
1393}
1394
1395static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
1396{
1397 return sec_aead_ctx_init(tfm, "sha1");
1398}
1399
1400static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
1401{
1402 return sec_aead_ctx_init(tfm, "sha256");
1403}
1404
1405static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
1406{
1407 return sec_aead_ctx_init(tfm, "sha512");
1408}
1409
1410static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
1411{
1412 struct skcipher_request *sk_req = sreq->c_req.sk_req;
1413 struct device *dev = SEC_CTX_DEV(ctx);
1414 u8 c_alg = ctx->c_ctx.c_alg;
1415
1416 if (unlikely(!sk_req->src || !sk_req->dst)) {
1417 dev_err(dev, "skcipher input param error!\n");
1418 return -EINVAL;
1419 }
1420 sreq->c_req.c_len = sk_req->cryptlen;
1421
1422 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
1423 sreq->use_pbuf = true;
1424 else
1425 sreq->use_pbuf = false;
1426
1427 if (c_alg == SEC_CALG_3DES) {
1428 if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
1429 dev_err(dev, "skcipher 3des input length error!\n");
1430 return -EINVAL;
1431 }
1432 return 0;
1433 } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
1434 if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
1435 dev_err(dev, "skcipher aes input length error!\n");
1436 return -EINVAL;
1437 }
1438 return 0;
1439 }
1440
1441 dev_err(dev, "skcipher algorithm error!\n");
1442 return -EINVAL;
1443}
1444
1445static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
1446{
1447 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
1448 struct sec_req *req = skcipher_request_ctx(sk_req);
1449 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1450 int ret;
1451
1452 if (!sk_req->cryptlen)
1453 return 0;
1454
1455 req->flag = sk_req->base.flags;
1456 req->c_req.sk_req = sk_req;
1457 req->c_req.encrypt = encrypt;
1458 req->ctx = ctx;
1459
1460 ret = sec_skcipher_param_check(ctx, req);
1461 if (unlikely(ret))
1462 return -EINVAL;
1463
1464 return ctx->req_op->process(ctx, req);
1465}
1466
1467static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
1468{
1469 return sec_skcipher_crypto(sk_req, true);
1470}
1471
1472static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
1473{
1474 return sec_skcipher_crypto(sk_req, false);
1475}
1476
1477#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
1478 sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
1479{\
1480 .base = {\
1481 .cra_name = sec_cra_name,\
1482 .cra_driver_name = "hisi_sec_"sec_cra_name,\
1483 .cra_priority = SEC_PRIORITY,\
1484 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
1485 .cra_blocksize = blk_size,\
1486 .cra_ctxsize = sizeof(struct sec_ctx),\
1487 .cra_module = THIS_MODULE,\
1488 },\
1489 .init = ctx_init,\
1490 .exit = ctx_exit,\
1491 .setkey = sec_set_key,\
1492 .decrypt = sec_skcipher_decrypt,\
1493 .encrypt = sec_skcipher_encrypt,\
1494 .min_keysize = sec_min_key_size,\
1495 .max_keysize = sec_max_key_size,\
1496 .ivsize = iv_size,\
1497},
1498
1499#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
1500 max_key_size, blk_size, iv_size) \
1501 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
1502 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
1503
1504static struct skcipher_alg sec_skciphers[] = {
1505 SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
1506 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
1507 AES_BLOCK_SIZE, 0)
1508
1509 SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
1510 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
1511 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1512
1513 SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
1514 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
1515 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1516
1517 SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
1518 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
1519 DES3_EDE_BLOCK_SIZE, 0)
1520
1521 SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
1522 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
1523 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
1524
1525 SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
1526 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
1527 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1528
1529 SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
1530 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
1531 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1532};
1533
1534static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
1535{
1536 u8 c_alg = ctx->c_ctx.c_alg;
1537 struct aead_request *req = sreq->aead_req.aead_req;
1538 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1539 size_t authsize = crypto_aead_authsize(tfm);
1540
1541 if (unlikely(!req->src || !req->dst || !req->cryptlen ||
1542 req->assoclen > SEC_MAX_AAD_LEN)) {
1543 dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
1544 return -EINVAL;
1545 }
1546
1547 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
1548 SEC_PBUF_SZ)
1549 sreq->use_pbuf = true;
1550 else
1551 sreq->use_pbuf = false;
1552
1553
1554 if (unlikely(c_alg != SEC_CALG_AES)) {
1555 dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
1556 return -EINVAL;
1557
1558 }
1559 if (sreq->c_req.encrypt)
1560 sreq->c_req.c_len = req->cryptlen;
1561 else
1562 sreq->c_req.c_len = req->cryptlen - authsize;
1563
1564 if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
1565 dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n");
1566 return -EINVAL;
1567 }
1568
1569 return 0;
1570}
1571
1572static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
1573{
1574 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1575 struct sec_req *req = aead_request_ctx(a_req);
1576 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1577 int ret;
1578
1579 req->flag = a_req->base.flags;
1580 req->aead_req.aead_req = a_req;
1581 req->c_req.encrypt = encrypt;
1582 req->ctx = ctx;
1583
1584 ret = sec_aead_param_check(ctx, req);
1585 if (unlikely(ret))
1586 return -EINVAL;
1587
1588 return ctx->req_op->process(ctx, req);
1589}
1590
1591static int sec_aead_encrypt(struct aead_request *a_req)
1592{
1593 return sec_aead_crypto(a_req, true);
1594}
1595
1596static int sec_aead_decrypt(struct aead_request *a_req)
1597{
1598 return sec_aead_crypto(a_req, false);
1599}
1600
1601#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
1602 ctx_exit, blk_size, iv_size, max_authsize)\
1603{\
1604 .base = {\
1605 .cra_name = sec_cra_name,\
1606 .cra_driver_name = "hisi_sec_"sec_cra_name,\
1607 .cra_priority = SEC_PRIORITY,\
1608 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
1609 .cra_blocksize = blk_size,\
1610 .cra_ctxsize = sizeof(struct sec_ctx),\
1611 .cra_module = THIS_MODULE,\
1612 },\
1613 .init = ctx_init,\
1614 .exit = ctx_exit,\
1615 .setkey = sec_set_key,\
1616 .decrypt = sec_aead_decrypt,\
1617 .encrypt = sec_aead_encrypt,\
1618 .ivsize = iv_size,\
1619 .maxauthsize = max_authsize,\
1620}
1621
1622#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
1623 SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
1624 sec_aead_ctx_exit, blksize, ivsize, authsize)
1625
1626static struct aead_alg sec_aeads[] = {
1627 SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
1628 sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
1629 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
1630
1631 SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
1632 sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
1633 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
1634
1635 SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
1636 sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
1637 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
1638};
1639
1640int sec_register_to_crypto(void)
1641{
1642 int ret;
1643
1644
1645 ret = crypto_register_skciphers(sec_skciphers,
1646 ARRAY_SIZE(sec_skciphers));
1647 if (ret)
1648 return ret;
1649
1650 ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
1651 if (ret)
1652 crypto_unregister_skciphers(sec_skciphers,
1653 ARRAY_SIZE(sec_skciphers));
1654 return ret;
1655}
1656
1657void sec_unregister_from_crypto(void)
1658{
1659 crypto_unregister_skciphers(sec_skciphers,
1660 ARRAY_SIZE(sec_skciphers));
1661 crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
1662}
1663