1
2
3
4
5
6#include <linux/err.h>
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/errno.h>
10#include <linux/kernel.h>
11#include <linux/interrupt.h>
12#include <linux/platform_device.h>
13#include <linux/scatterlist.h>
14#include <linux/crypto.h>
15#include <linux/kthread.h>
16#include <linux/rtnetlink.h>
17#include <linux/sched.h>
18#include <linux/of_address.h>
19#include <linux/of_device.h>
20#include <linux/io.h>
21#include <linux/bitops.h>
22
23#include <crypto/algapi.h>
24#include <crypto/aead.h>
25#include <crypto/internal/aead.h>
26#include <crypto/aes.h>
27#include <crypto/internal/des.h>
28#include <crypto/hmac.h>
29#include <crypto/sha.h>
30#include <crypto/md5.h>
31#include <crypto/authenc.h>
32#include <crypto/skcipher.h>
33#include <crypto/hash.h>
34#include <crypto/sha3.h>
35
36#include "util.h"
37#include "cipher.h"
38#include "spu.h"
39#include "spum.h"
40#include "spu2.h"
41
42
43
44struct device_private iproc_priv;
45
46
47
48int flow_debug_logging;
49module_param(flow_debug_logging, int, 0644);
50MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
51
52int packet_debug_logging;
53module_param(packet_debug_logging, int, 0644);
54MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
55
56int debug_logging_sleep;
57module_param(debug_logging_sleep, int, 0644);
58MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
59
60
61
62
63
64
65
66
67
68
69static int cipher_pri = 150;
70module_param(cipher_pri, int, 0644);
71MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
72
73static int hash_pri = 100;
74module_param(hash_pri, int, 0644);
75MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
76
77static int aead_pri = 150;
78module_param(aead_pri, int, 0644);
79MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
80
81
82
83
84
85
86
87
88static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
89
90
91
92
93#define BCM_HDR_LEN iproc_priv.bcm_hdr_len
94
95
96#define MBOX_SLEEP_MIN 800
97#define MBOX_SLEEP_MAX 1000
98
99
100
101
102
103
104
105static u8 select_channel(void)
106{
107 u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
108
109 return chan_idx % iproc_priv.spu.num_chan;
110}
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132static int
133spu_skcipher_rx_sg_create(struct brcm_message *mssg,
134 struct iproc_reqctx_s *rctx,
135 u8 rx_frag_num,
136 unsigned int chunksize, u32 stat_pad_len)
137{
138 struct spu_hw *spu = &iproc_priv.spu;
139 struct scatterlist *sg;
140 struct iproc_ctx_s *ctx = rctx->ctx;
141 u32 datalen;
142
143 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
144 rctx->gfp);
145 if (!mssg->spu.dst)
146 return -ENOMEM;
147
148 sg = mssg->spu.dst;
149 sg_init_table(sg, rx_frag_num);
150
151 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
152
153
154 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
155 spu->spu_xts_tweak_in_payload())
156 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
157 SPU_XTS_TWEAK_SIZE);
158
159
160 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
161 rctx->dst_nents, chunksize);
162 if (datalen < chunksize) {
163 pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
164 __func__, chunksize, datalen);
165 return -EFAULT;
166 }
167
168 if (ctx->cipher.alg == CIPHER_ALG_RC4)
169
170 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN);
171
172 if (stat_pad_len)
173 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
174
175 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
176 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
177
178 return 0;
179}
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200static int
201spu_skcipher_tx_sg_create(struct brcm_message *mssg,
202 struct iproc_reqctx_s *rctx,
203 u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
204{
205 struct spu_hw *spu = &iproc_priv.spu;
206 struct scatterlist *sg;
207 struct iproc_ctx_s *ctx = rctx->ctx;
208 u32 datalen;
209 u32 stat_len;
210
211 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
212 rctx->gfp);
213 if (unlikely(!mssg->spu.src))
214 return -ENOMEM;
215
216 sg = mssg->spu.src;
217 sg_init_table(sg, tx_frag_num);
218
219 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
220 BCM_HDR_LEN + ctx->spu_req_hdr_len);
221
222
223 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
224 spu->spu_xts_tweak_in_payload())
225 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
226
227
228 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
229 rctx->src_nents, chunksize);
230 if (unlikely(datalen < chunksize)) {
231 pr_err("%s(): failed to copy src sg to mbox msg",
232 __func__);
233 return -EFAULT;
234 }
235
236 if (pad_len)
237 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
238
239 stat_len = spu->spu_tx_status_len();
240 if (stat_len) {
241 memset(rctx->msg_buf.tx_stat, 0, stat_len);
242 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
243 }
244 return 0;
245}
246
247static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
248 u8 chan_idx)
249{
250 int err;
251 int retry_cnt = 0;
252 struct device *dev = &(iproc_priv.pdev->dev);
253
254 err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
255 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
256 while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
257
258
259
260
261 retry_cnt++;
262 usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
263 err = mbox_send_message(iproc_priv.mbox[chan_idx],
264 mssg);
265 atomic_inc(&iproc_priv.mb_no_spc);
266 }
267 }
268 if (err < 0) {
269 atomic_inc(&iproc_priv.mb_send_fail);
270 return err;
271 }
272
273
274 err = mssg->error;
275 if (unlikely(err < 0)) {
276 dev_err(dev, "message error %d", err);
277
278 }
279
280
281 mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
282 return err;
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
304{
305 struct spu_hw *spu = &iproc_priv.spu;
306 struct crypto_async_request *areq = rctx->parent;
307 struct skcipher_request *req =
308 container_of(areq, struct skcipher_request, base);
309 struct iproc_ctx_s *ctx = rctx->ctx;
310 struct spu_cipher_parms cipher_parms;
311 int err;
312 unsigned int chunksize;
313 int remaining;
314 int chunk_start;
315
316
317 u8 local_iv_ctr[MAX_IV_SIZE];
318 u32 stat_pad_len;
319 u32 pad_len;
320 bool update_key = false;
321 struct brcm_message *mssg;
322
323
324 u8 rx_frag_num = 2;
325 u8 tx_frag_num = 1;
326
327 flow_log("%s\n", __func__);
328
329 cipher_parms.alg = ctx->cipher.alg;
330 cipher_parms.mode = ctx->cipher.mode;
331 cipher_parms.type = ctx->cipher_type;
332 cipher_parms.key_len = ctx->enckeylen;
333 cipher_parms.key_buf = ctx->enckey;
334 cipher_parms.iv_buf = local_iv_ctr;
335 cipher_parms.iv_len = rctx->iv_ctr_len;
336
337 mssg = &rctx->mb_mssg;
338 chunk_start = rctx->src_sent;
339 remaining = rctx->total_todo - chunk_start;
340
341
342 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
343 (remaining > ctx->max_payload))
344 chunksize = ctx->max_payload;
345 else
346 chunksize = remaining;
347
348 rctx->src_sent += chunksize;
349 rctx->total_sent = rctx->src_sent;
350
351
352 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
353 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
354
355 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
356 rctx->is_encrypt && chunk_start)
357
358
359
360
361 sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
362 rctx->iv_ctr_len,
363 chunk_start - rctx->iv_ctr_len);
364
365 if (rctx->iv_ctr_len) {
366
367 __builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
368 rctx->iv_ctr_len);
369
370
371 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
372 !rctx->is_encrypt) {
373
374
375
376
377 sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
378 rctx->iv_ctr_len,
379 rctx->src_sent - rctx->iv_ctr_len);
380 } else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
381
382
383
384
385
386
387
388
389
390 add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
391 }
392 }
393
394 if (ctx->cipher.alg == CIPHER_ALG_RC4) {
395 rx_frag_num++;
396 if (chunk_start) {
397
398
399
400
401 cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak;
402 update_key = true;
403 cipher_parms.type = CIPHER_TYPE_UPDT;
404 } else if (!rctx->is_encrypt) {
405
406
407
408
409
410
411 update_key = true;
412 cipher_parms.type = CIPHER_TYPE_INIT;
413 }
414 }
415
416 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
417 flow_log("max_payload infinite\n");
418 else
419 flow_log("max_payload %u\n", ctx->max_payload);
420
421 flow_log("sent:%u start:%u remains:%u size:%u\n",
422 rctx->src_sent, chunk_start, remaining, chunksize);
423
424
425 memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
426 sizeof(rctx->msg_buf.bcm_spu_req_hdr));
427
428
429
430
431
432
433 spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
434 ctx->spu_req_hdr_len, !(rctx->is_encrypt),
435 &cipher_parms, update_key, chunksize);
436
437 atomic64_add(chunksize, &iproc_priv.bytes_out);
438
439 stat_pad_len = spu->spu_wordalign_padlen(chunksize);
440 if (stat_pad_len)
441 rx_frag_num++;
442 pad_len = stat_pad_len;
443 if (pad_len) {
444 tx_frag_num++;
445 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
446 0, ctx->auth.alg, ctx->auth.mode,
447 rctx->total_sent, stat_pad_len);
448 }
449
450 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
451 ctx->spu_req_hdr_len);
452 packet_log("payload:\n");
453 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
454 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
455
456
457
458
459
460 memset(mssg, 0, sizeof(*mssg));
461 mssg->type = BRCM_MESSAGE_SPU;
462 mssg->ctx = rctx;
463
464
465 rx_frag_num += rctx->dst_nents;
466
467 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
468 spu->spu_xts_tweak_in_payload())
469 rx_frag_num++;
470
471 err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
472 stat_pad_len);
473 if (err)
474 return err;
475
476
477 tx_frag_num += rctx->src_nents;
478 if (spu->spu_tx_status_len())
479 tx_frag_num++;
480
481 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
482 spu->spu_xts_tweak_in_payload())
483 tx_frag_num++;
484
485 err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
486 pad_len);
487 if (err)
488 return err;
489
490 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
491 if (unlikely(err < 0))
492 return err;
493
494 return -EINPROGRESS;
495}
496
497
498
499
500
501
502static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
503{
504 struct spu_hw *spu = &iproc_priv.spu;
505#ifdef DEBUG
506 struct crypto_async_request *areq = rctx->parent;
507 struct skcipher_request *req = skcipher_request_cast(areq);
508#endif
509 struct iproc_ctx_s *ctx = rctx->ctx;
510 u32 payload_len;
511
512
513 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
514
515
516
517
518
519 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
520 spu->spu_xts_tweak_in_payload() &&
521 (payload_len >= SPU_XTS_TWEAK_SIZE))
522 payload_len -= SPU_XTS_TWEAK_SIZE;
523
524 atomic64_add(payload_len, &iproc_priv.bytes_in);
525
526 flow_log("%s() offset: %u, bd_len: %u BD:\n",
527 __func__, rctx->total_received, payload_len);
528
529 dump_sg(req->dst, rctx->total_received, payload_len);
530 if (ctx->cipher.alg == CIPHER_ALG_RC4)
531 packet_dump(" supdt ", rctx->msg_buf.c.supdt_tweak,
532 SPU_SUPDT_LEN);
533
534 rctx->total_received += payload_len;
535 if (rctx->total_received == rctx->total_todo) {
536 atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
537 atomic_inc(
538 &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
539 }
540}
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561static int
562spu_ahash_rx_sg_create(struct brcm_message *mssg,
563 struct iproc_reqctx_s *rctx,
564 u8 rx_frag_num, unsigned int digestsize,
565 u32 stat_pad_len)
566{
567 struct spu_hw *spu = &iproc_priv.spu;
568 struct scatterlist *sg;
569 struct iproc_ctx_s *ctx = rctx->ctx;
570
571 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
572 rctx->gfp);
573 if (!mssg->spu.dst)
574 return -ENOMEM;
575
576 sg = mssg->spu.dst;
577 sg_init_table(sg, rx_frag_num);
578
579 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
580
581
582 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
583
584 if (stat_pad_len)
585 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
586
587 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
588 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
589 return 0;
590}
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613static int
614spu_ahash_tx_sg_create(struct brcm_message *mssg,
615 struct iproc_reqctx_s *rctx,
616 u8 tx_frag_num,
617 u32 spu_hdr_len,
618 unsigned int hash_carry_len,
619 unsigned int new_data_len, u32 pad_len)
620{
621 struct spu_hw *spu = &iproc_priv.spu;
622 struct scatterlist *sg;
623 u32 datalen;
624 u32 stat_len;
625
626 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
627 rctx->gfp);
628 if (!mssg->spu.src)
629 return -ENOMEM;
630
631 sg = mssg->spu.src;
632 sg_init_table(sg, tx_frag_num);
633
634 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
635 BCM_HDR_LEN + spu_hdr_len);
636
637 if (hash_carry_len)
638 sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
639
640 if (new_data_len) {
641
642 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
643 rctx->src_nents, new_data_len);
644 if (datalen < new_data_len) {
645 pr_err("%s(): failed to copy src sg to mbox msg",
646 __func__);
647 return -EFAULT;
648 }
649 }
650
651 if (pad_len)
652 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
653
654 stat_len = spu->spu_tx_status_len();
655 if (stat_len) {
656 memset(rctx->msg_buf.tx_stat, 0, stat_len);
657 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
658 }
659
660 return 0;
661}
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689static int handle_ahash_req(struct iproc_reqctx_s *rctx)
690{
691 struct spu_hw *spu = &iproc_priv.spu;
692 struct crypto_async_request *areq = rctx->parent;
693 struct ahash_request *req = ahash_request_cast(areq);
694 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
695 struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
696 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
697 struct iproc_ctx_s *ctx = rctx->ctx;
698
699
700 unsigned int nbytes_to_hash = 0;
701 int err;
702 unsigned int chunksize = 0;
703
704
705
706
707 unsigned int new_data_len;
708
709 unsigned int __maybe_unused chunk_start = 0;
710 u32 db_size;
711 int pad_len = 0;
712 u32 data_pad_len = 0;
713 u32 stat_pad_len = 0;
714 struct brcm_message *mssg;
715 struct spu_request_opts req_opts;
716 struct spu_cipher_parms cipher_parms;
717 struct spu_hash_parms hash_parms;
718 struct spu_aead_parms aead_parms;
719 unsigned int local_nbuf;
720 u32 spu_hdr_len;
721 unsigned int digestsize;
722 u16 rem = 0;
723
724
725
726
727
728 u8 rx_frag_num = 3;
729 u8 tx_frag_num = 1;
730
731 flow_log("total_todo %u, total_sent %u\n",
732 rctx->total_todo, rctx->total_sent);
733
734 memset(&req_opts, 0, sizeof(req_opts));
735 memset(&cipher_parms, 0, sizeof(cipher_parms));
736 memset(&hash_parms, 0, sizeof(hash_parms));
737 memset(&aead_parms, 0, sizeof(aead_parms));
738
739 req_opts.bd_suppress = true;
740 hash_parms.alg = ctx->auth.alg;
741 hash_parms.mode = ctx->auth.mode;
742 hash_parms.type = HASH_TYPE_NONE;
743 hash_parms.key_buf = (u8 *)ctx->authkey;
744 hash_parms.key_len = ctx->authkeylen;
745
746
747
748
749
750
751
752
753 cipher_parms.type = ctx->cipher_type;
754
755 mssg = &rctx->mb_mssg;
756 chunk_start = rctx->src_sent;
757
758
759
760
761
762 nbytes_to_hash = rctx->total_todo - rctx->total_sent;
763 chunksize = nbytes_to_hash;
764 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
765 (chunksize > ctx->max_payload))
766 chunksize = ctx->max_payload;
767
768
769
770
771
772
773 if (!rctx->is_final) {
774 u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
775 u16 new_len;
776
777 rem = chunksize % blocksize;
778 if (rem) {
779
780 chunksize -= rem;
781 if (chunksize == 0) {
782
783 new_len = rem - rctx->hash_carry_len;
784 sg_copy_part_to_buf(req->src, dest, new_len,
785 rctx->src_sent);
786 rctx->hash_carry_len = rem;
787 flow_log("Exiting with hash carry len: %u\n",
788 rctx->hash_carry_len);
789 packet_dump(" buf: ",
790 rctx->hash_carry,
791 rctx->hash_carry_len);
792 return -EAGAIN;
793 }
794 }
795 }
796
797
798 local_nbuf = rctx->hash_carry_len;
799 rctx->hash_carry_len = 0;
800 if (local_nbuf)
801 tx_frag_num++;
802 new_data_len = chunksize - local_nbuf;
803
804
805 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
806 new_data_len);
807
808
809 if (hash_parms.alg == HASH_ALG_AES)
810 hash_parms.type = (enum hash_type)cipher_parms.type;
811 else
812 hash_parms.type = spu->spu_hash_type(rctx->total_sent);
813
814 digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
815 hash_parms.type);
816 hash_parms.digestsize = digestsize;
817
818
819 rctx->total_sent += chunksize;
820
821 rctx->src_sent += new_data_len;
822
823 if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
824 hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
825 hash_parms.mode,
826 chunksize,
827 blocksize);
828
829
830
831
832
833 if ((hash_parms.type == HASH_TYPE_UPDT) &&
834 (hash_parms.alg != HASH_ALG_AES)) {
835 hash_parms.key_buf = rctx->incr_hash;
836 hash_parms.key_len = digestsize;
837 }
838
839 atomic64_add(chunksize, &iproc_priv.bytes_out);
840
841 flow_log("%s() final: %u nbuf: %u ",
842 __func__, rctx->is_final, local_nbuf);
843
844 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
845 flow_log("max_payload infinite\n");
846 else
847 flow_log("max_payload %u\n", ctx->max_payload);
848
849 flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
850
851
852 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
853
854 hash_parms.prebuf_len = local_nbuf;
855 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
856 BCM_HDR_LEN,
857 &req_opts, &cipher_parms,
858 &hash_parms, &aead_parms,
859 new_data_len);
860
861 if (spu_hdr_len == 0) {
862 pr_err("Failed to create SPU request header\n");
863 return -EFAULT;
864 }
865
866
867
868
869
870 data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
871 db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
872 0, 0, hash_parms.pad_len);
873 if (spu->spu_tx_status_len())
874 stat_pad_len = spu->spu_wordalign_padlen(db_size);
875 if (stat_pad_len)
876 rx_frag_num++;
877 pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
878 if (pad_len) {
879 tx_frag_num++;
880 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
881 hash_parms.pad_len, ctx->auth.alg,
882 ctx->auth.mode, rctx->total_sent,
883 stat_pad_len);
884 }
885
886 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
887 spu_hdr_len);
888 packet_dump(" prebuf: ", rctx->hash_carry, local_nbuf);
889 flow_log("Data:\n");
890 dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
891 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
892
893
894
895
896
897 memset(mssg, 0, sizeof(*mssg));
898 mssg->type = BRCM_MESSAGE_SPU;
899 mssg->ctx = rctx;
900
901
902 err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
903 stat_pad_len);
904 if (err)
905 return err;
906
907
908 tx_frag_num += rctx->src_nents;
909 if (spu->spu_tx_status_len())
910 tx_frag_num++;
911 err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
912 local_nbuf, new_data_len, pad_len);
913 if (err)
914 return err;
915
916 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
917 if (unlikely(err < 0))
918 return err;
919
920 return -EINPROGRESS;
921}
922
923
924
925
926
927
928
929
930
931
932
933static int spu_hmac_outer_hash(struct ahash_request *req,
934 struct iproc_ctx_s *ctx)
935{
936 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
937 unsigned int blocksize =
938 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
939 int rc;
940
941 switch (ctx->auth.alg) {
942 case HASH_ALG_MD5:
943 rc = do_shash("md5", req->result, ctx->opad, blocksize,
944 req->result, ctx->digestsize, NULL, 0);
945 break;
946 case HASH_ALG_SHA1:
947 rc = do_shash("sha1", req->result, ctx->opad, blocksize,
948 req->result, ctx->digestsize, NULL, 0);
949 break;
950 case HASH_ALG_SHA224:
951 rc = do_shash("sha224", req->result, ctx->opad, blocksize,
952 req->result, ctx->digestsize, NULL, 0);
953 break;
954 case HASH_ALG_SHA256:
955 rc = do_shash("sha256", req->result, ctx->opad, blocksize,
956 req->result, ctx->digestsize, NULL, 0);
957 break;
958 case HASH_ALG_SHA384:
959 rc = do_shash("sha384", req->result, ctx->opad, blocksize,
960 req->result, ctx->digestsize, NULL, 0);
961 break;
962 case HASH_ALG_SHA512:
963 rc = do_shash("sha512", req->result, ctx->opad, blocksize,
964 req->result, ctx->digestsize, NULL, 0);
965 break;
966 default:
967 pr_err("%s() Error : unknown hmac type\n", __func__);
968 rc = -EINVAL;
969 }
970 return rc;
971}
972
973
974
975
976
977
978
979
980static int ahash_req_done(struct iproc_reqctx_s *rctx)
981{
982 struct spu_hw *spu = &iproc_priv.spu;
983 struct crypto_async_request *areq = rctx->parent;
984 struct ahash_request *req = ahash_request_cast(areq);
985 struct iproc_ctx_s *ctx = rctx->ctx;
986 int err;
987
988 memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
989
990 if (spu->spu_type == SPU_TYPE_SPUM) {
991
992
993
994 if (ctx->auth.alg == HASH_ALG_MD5) {
995 __swab32s((u32 *)req->result);
996 __swab32s(((u32 *)req->result) + 1);
997 __swab32s(((u32 *)req->result) + 2);
998 __swab32s(((u32 *)req->result) + 3);
999 __swab32s(((u32 *)req->result) + 4);
1000 }
1001 }
1002
1003 flow_dump(" digest ", req->result, ctx->digestsize);
1004
1005
1006 if (rctx->is_sw_hmac) {
1007 err = spu_hmac_outer_hash(req, ctx);
1008 if (err < 0)
1009 return err;
1010 flow_dump(" hmac: ", req->result, ctx->digestsize);
1011 }
1012
1013 if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
1014 atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
1015 atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
1016 } else {
1017 atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
1018 atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
1019 }
1020
1021 return 0;
1022}
1023
1024
1025
1026
1027
1028
1029
1030static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
1031{
1032 struct iproc_ctx_s *ctx = rctx->ctx;
1033#ifdef DEBUG
1034 struct crypto_async_request *areq = rctx->parent;
1035 struct ahash_request *req = ahash_request_cast(areq);
1036 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1037 unsigned int blocksize =
1038 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
1039#endif
1040
1041
1042
1043
1044 memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1045
1046 flow_log("%s() blocksize:%u digestsize:%u\n",
1047 __func__, blocksize, ctx->digestsize);
1048
1049 atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
1050
1051 if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1052 ahash_req_done(rctx);
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079static int spu_aead_rx_sg_create(struct brcm_message *mssg,
1080 struct aead_request *req,
1081 struct iproc_reqctx_s *rctx,
1082 u8 rx_frag_num,
1083 unsigned int assoc_len,
1084 u32 ret_iv_len, unsigned int resp_len,
1085 unsigned int digestsize, u32 stat_pad_len)
1086{
1087 struct spu_hw *spu = &iproc_priv.spu;
1088 struct scatterlist *sg;
1089 struct iproc_ctx_s *ctx = rctx->ctx;
1090 u32 datalen;
1091 u32 assoc_buf_len;
1092 u8 data_padlen = 0;
1093
1094 if (ctx->is_rfc4543) {
1095
1096 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1097 assoc_len + resp_len);
1098 assoc_buf_len = assoc_len;
1099 } else {
1100 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1101 resp_len);
1102 assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
1103 assoc_len, ret_iv_len,
1104 rctx->is_encrypt);
1105 }
1106
1107 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1108
1109 data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
1110 resp_len +
1111 data_padlen);
1112
1113 if (data_padlen)
1114
1115 rx_frag_num++;
1116
1117 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
1118 rctx->gfp);
1119 if (!mssg->spu.dst)
1120 return -ENOMEM;
1121
1122 sg = mssg->spu.dst;
1123 sg_init_table(sg, rx_frag_num);
1124
1125
1126 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1127
1128 if (assoc_buf_len) {
1129
1130
1131
1132
1133 memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1134 sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1135 }
1136
1137 if (resp_len) {
1138
1139
1140
1141
1142 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1143 rctx->dst_nents, resp_len);
1144 if (datalen < (resp_len)) {
1145 pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1146 __func__, resp_len, datalen);
1147 return -EFAULT;
1148 }
1149 }
1150
1151
1152 if (data_padlen) {
1153 memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1154 sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1155 }
1156
1157
1158 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1159
1160 flow_log("stat_pad_len %u\n", stat_pad_len);
1161 if (stat_pad_len) {
1162 memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1163 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1164 }
1165
1166 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1167 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1168
1169 return 0;
1170}
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199static int spu_aead_tx_sg_create(struct brcm_message *mssg,
1200 struct iproc_reqctx_s *rctx,
1201 u8 tx_frag_num,
1202 u32 spu_hdr_len,
1203 struct scatterlist *assoc,
1204 unsigned int assoc_len,
1205 int assoc_nents,
1206 unsigned int aead_iv_len,
1207 unsigned int chunksize,
1208 u32 aad_pad_len, u32 pad_len, bool incl_icv)
1209{
1210 struct spu_hw *spu = &iproc_priv.spu;
1211 struct scatterlist *sg;
1212 struct scatterlist *assoc_sg = assoc;
1213 struct iproc_ctx_s *ctx = rctx->ctx;
1214 u32 datalen;
1215 u32 written;
1216 u32 assoc_offset = 0;
1217 u32 stat_len;
1218
1219 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
1220 rctx->gfp);
1221 if (!mssg->spu.src)
1222 return -ENOMEM;
1223
1224 sg = mssg->spu.src;
1225 sg_init_table(sg, tx_frag_num);
1226
1227 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1228 BCM_HDR_LEN + spu_hdr_len);
1229
1230 if (assoc_len) {
1231
1232 written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
1233 assoc_nents, assoc_len);
1234 if (written < assoc_len) {
1235 pr_err("%s(): failed to copy assoc sg to mbox msg",
1236 __func__);
1237 return -EFAULT;
1238 }
1239 }
1240
1241 if (aead_iv_len)
1242 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1243
1244 if (aad_pad_len) {
1245 memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1246 sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1247 }
1248
1249 datalen = chunksize;
1250 if ((chunksize > ctx->digestsize) && incl_icv)
1251 datalen -= ctx->digestsize;
1252 if (datalen) {
1253
1254 written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1255 rctx->src_nents, datalen);
1256 if (written < datalen) {
1257 pr_err("%s(): failed to copy src sg to mbox msg",
1258 __func__);
1259 return -EFAULT;
1260 }
1261 }
1262
1263 if (pad_len) {
1264 memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1265 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1266 }
1267
1268 if (incl_icv)
1269 sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1270
1271 stat_len = spu->spu_tx_status_len();
1272 if (stat_len) {
1273 memset(rctx->msg_buf.tx_stat, 0, stat_len);
1274 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1275 }
1276 return 0;
1277}
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296static int handle_aead_req(struct iproc_reqctx_s *rctx)
1297{
1298 struct spu_hw *spu = &iproc_priv.spu;
1299 struct crypto_async_request *areq = rctx->parent;
1300 struct aead_request *req = container_of(areq,
1301 struct aead_request, base);
1302 struct iproc_ctx_s *ctx = rctx->ctx;
1303 int err;
1304 unsigned int chunksize;
1305 unsigned int resp_len;
1306 u32 spu_hdr_len;
1307 u32 db_size;
1308 u32 stat_pad_len;
1309 u32 pad_len;
1310 struct brcm_message *mssg;
1311 struct spu_request_opts req_opts;
1312 struct spu_cipher_parms cipher_parms;
1313 struct spu_hash_parms hash_parms;
1314 struct spu_aead_parms aead_parms;
1315 int assoc_nents = 0;
1316 bool incl_icv = false;
1317 unsigned int digestsize = ctx->digestsize;
1318
1319
1320
1321 u8 rx_frag_num = 2;
1322 u8 tx_frag_num = 1;
1323
1324
1325 chunksize = rctx->total_todo;
1326
1327 flow_log("%s: chunksize %u\n", __func__, chunksize);
1328
1329 memset(&req_opts, 0, sizeof(req_opts));
1330 memset(&hash_parms, 0, sizeof(hash_parms));
1331 memset(&aead_parms, 0, sizeof(aead_parms));
1332
1333 req_opts.is_inbound = !(rctx->is_encrypt);
1334 req_opts.auth_first = ctx->auth_first;
1335 req_opts.is_aead = true;
1336 req_opts.is_esp = ctx->is_esp;
1337
1338 cipher_parms.alg = ctx->cipher.alg;
1339 cipher_parms.mode = ctx->cipher.mode;
1340 cipher_parms.type = ctx->cipher_type;
1341 cipher_parms.key_buf = ctx->enckey;
1342 cipher_parms.key_len = ctx->enckeylen;
1343 cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1344 cipher_parms.iv_len = rctx->iv_ctr_len;
1345
1346 hash_parms.alg = ctx->auth.alg;
1347 hash_parms.mode = ctx->auth.mode;
1348 hash_parms.type = HASH_TYPE_NONE;
1349 hash_parms.key_buf = (u8 *)ctx->authkey;
1350 hash_parms.key_len = ctx->authkeylen;
1351 hash_parms.digestsize = digestsize;
1352
1353 if ((ctx->auth.alg == HASH_ALG_SHA224) &&
1354 (ctx->authkeylen < SHA224_DIGEST_SIZE))
1355 hash_parms.key_len = SHA224_DIGEST_SIZE;
1356
1357 aead_parms.assoc_size = req->assoclen;
1358 if (ctx->is_esp && !ctx->is_rfc4543) {
1359
1360
1361
1362
1363
1364 aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
1365
1366 if (rctx->is_encrypt) {
1367 aead_parms.return_iv = true;
1368 aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
1369 aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
1370 }
1371 } else {
1372 aead_parms.ret_iv_len = 0;
1373 }
1374
1375
1376
1377
1378
1379
1380
1381 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1382 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1383 if (aead_parms.assoc_size)
1384 assoc_nents = spu_sg_count(rctx->assoc, 0,
1385 aead_parms.assoc_size);
1386
1387 mssg = &rctx->mb_mssg;
1388
1389 rctx->total_sent = chunksize;
1390 rctx->src_sent = chunksize;
1391 if (spu->spu_assoc_resp_len(ctx->cipher.mode,
1392 aead_parms.assoc_size,
1393 aead_parms.ret_iv_len,
1394 rctx->is_encrypt))
1395 rx_frag_num++;
1396
1397 aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
1398 rctx->iv_ctr_len);
1399
1400 if (ctx->auth.alg == HASH_ALG_AES)
1401 hash_parms.type = (enum hash_type)ctx->cipher_type;
1402
1403
1404 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1405 aead_parms.assoc_size);
1406
1407
1408 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1409 chunksize);
1410
1411 if (ctx->cipher.mode == CIPHER_MODE_CCM) {
1412
1413
1414
1415
1416 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
1417 ctx->cipher.mode,
1418 aead_parms.assoc_size + 2);
1419
1420
1421
1422
1423
1424 if (!rctx->is_encrypt)
1425 aead_parms.data_pad_len =
1426 spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1427 chunksize - digestsize);
1428
1429
1430 spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
1431 chunksize, rctx->is_encrypt,
1432 ctx->is_esp);
1433 }
1434
1435 if (ctx->is_rfc4543) {
1436
1437
1438
1439
1440 aead_parms.aad_pad_len = 0;
1441 if (!rctx->is_encrypt)
1442 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1443 ctx->cipher.mode,
1444 aead_parms.assoc_size + chunksize -
1445 digestsize);
1446 else
1447 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1448 ctx->cipher.mode,
1449 aead_parms.assoc_size + chunksize);
1450
1451 req_opts.is_rfc4543 = true;
1452 }
1453
1454 if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1455 incl_icv = true;
1456 tx_frag_num++;
1457
1458 sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1459 req->assoclen + rctx->total_sent -
1460 digestsize);
1461 }
1462
1463 atomic64_add(chunksize, &iproc_priv.bytes_out);
1464
1465 flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
1466
1467
1468 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1469
1470 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1471 BCM_HDR_LEN, &req_opts,
1472 &cipher_parms, &hash_parms,
1473 &aead_parms, chunksize);
1474
1475
1476 db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
1477 chunksize, aead_parms.aad_pad_len,
1478 aead_parms.data_pad_len, 0);
1479
1480 stat_pad_len = spu->spu_wordalign_padlen(db_size);
1481
1482 if (stat_pad_len)
1483 rx_frag_num++;
1484 pad_len = aead_parms.data_pad_len + stat_pad_len;
1485 if (pad_len) {
1486 tx_frag_num++;
1487 spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1488 aead_parms.data_pad_len, 0,
1489 ctx->auth.alg, ctx->auth.mode,
1490 rctx->total_sent, stat_pad_len);
1491 }
1492
1493 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1494 spu_hdr_len);
1495 dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1496 packet_dump(" aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1497 packet_log("BD:\n");
1498 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1499 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1500
1501
1502
1503
1504
1505 memset(mssg, 0, sizeof(*mssg));
1506 mssg->type = BRCM_MESSAGE_SPU;
1507 mssg->ctx = rctx;
1508
1509
1510 rx_frag_num += rctx->dst_nents;
1511 resp_len = chunksize;
1512
1513
1514
1515
1516
1517
1518 rx_frag_num++;
1519
1520 if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
1521 (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1522
1523
1524
1525
1526 resp_len -= ctx->digestsize;
1527 if (resp_len == 0)
1528
1529 rx_frag_num -= rctx->dst_nents;
1530 }
1531
1532 err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1533 aead_parms.assoc_size,
1534 aead_parms.ret_iv_len, resp_len, digestsize,
1535 stat_pad_len);
1536 if (err)
1537 return err;
1538
1539
1540 tx_frag_num += rctx->src_nents;
1541 tx_frag_num += assoc_nents;
1542 if (aead_parms.aad_pad_len)
1543 tx_frag_num++;
1544 if (aead_parms.iv_len)
1545 tx_frag_num++;
1546 if (spu->spu_tx_status_len())
1547 tx_frag_num++;
1548 err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1549 rctx->assoc, aead_parms.assoc_size,
1550 assoc_nents, aead_parms.iv_len, chunksize,
1551 aead_parms.aad_pad_len, pad_len, incl_icv);
1552 if (err)
1553 return err;
1554
1555 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
1556 if (unlikely(err < 0))
1557 return err;
1558
1559 return -EINPROGRESS;
1560}
1561
1562
1563
1564
1565
1566static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1567{
1568 struct spu_hw *spu = &iproc_priv.spu;
1569 struct crypto_async_request *areq = rctx->parent;
1570 struct aead_request *req = container_of(areq,
1571 struct aead_request, base);
1572 struct iproc_ctx_s *ctx = rctx->ctx;
1573 u32 payload_len;
1574 unsigned int icv_offset;
1575 u32 result_len;
1576
1577
1578 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1579 flow_log("payload_len %u\n", payload_len);
1580
1581
1582 atomic64_add(payload_len, &iproc_priv.bytes_in);
1583
1584 if (req->assoclen)
1585 packet_dump(" assoc_data ", rctx->msg_buf.a.resp_aad,
1586 req->assoclen);
1587
1588
1589
1590
1591
1592
1593 result_len = req->cryptlen;
1594 if (rctx->is_encrypt) {
1595 icv_offset = req->assoclen + rctx->total_sent;
1596 packet_dump(" ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1597 flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
1598 sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1599 ctx->digestsize, icv_offset);
1600 result_len += ctx->digestsize;
1601 }
1602
1603 packet_log("response data: ");
1604 dump_sg(req->dst, req->assoclen, result_len);
1605
1606 atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
1607 if (ctx->cipher.alg == CIPHER_ALG_AES) {
1608 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1609 atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
1610 else if (ctx->cipher.mode == CIPHER_MODE_GCM)
1611 atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
1612 else
1613 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1614 } else {
1615 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1616 }
1617}
1618
1619
1620
1621
1622
1623
1624
1625
1626static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1627{
1628
1629 struct brcm_message *mssg = &rctx->mb_mssg;
1630
1631 kfree(mssg->spu.src);
1632 kfree(mssg->spu.dst);
1633 memset(mssg, 0, sizeof(struct brcm_message));
1634}
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644static void finish_req(struct iproc_reqctx_s *rctx, int err)
1645{
1646 struct crypto_async_request *areq = rctx->parent;
1647
1648 flow_log("%s() err:%d\n\n", __func__, err);
1649
1650
1651 spu_chunk_cleanup(rctx);
1652
1653 if (areq)
1654 areq->complete(areq, err);
1655}
1656
1657
1658
1659
1660
1661
1662static void spu_rx_callback(struct mbox_client *cl, void *msg)
1663{
1664 struct spu_hw *spu = &iproc_priv.spu;
1665 struct brcm_message *mssg = msg;
1666 struct iproc_reqctx_s *rctx;
1667 int err;
1668
1669 rctx = mssg->ctx;
1670 if (unlikely(!rctx)) {
1671
1672 pr_err("%s(): no request context", __func__);
1673 err = -EFAULT;
1674 goto cb_finish;
1675 }
1676
1677
1678 err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1679 if (err != 0) {
1680 if (err == SPU_INVALID_ICV)
1681 atomic_inc(&iproc_priv.bad_icv);
1682 err = -EBADMSG;
1683 goto cb_finish;
1684 }
1685
1686
1687 switch (rctx->ctx->alg->type) {
1688 case CRYPTO_ALG_TYPE_SKCIPHER:
1689 handle_skcipher_resp(rctx);
1690 break;
1691 case CRYPTO_ALG_TYPE_AHASH:
1692 handle_ahash_resp(rctx);
1693 break;
1694 case CRYPTO_ALG_TYPE_AEAD:
1695 handle_aead_resp(rctx);
1696 break;
1697 default:
1698 err = -EINVAL;
1699 goto cb_finish;
1700 }
1701
1702
1703
1704
1705
1706 if (rctx->total_sent < rctx->total_todo) {
1707
1708 spu_chunk_cleanup(rctx);
1709
1710 switch (rctx->ctx->alg->type) {
1711 case CRYPTO_ALG_TYPE_SKCIPHER:
1712 err = handle_skcipher_req(rctx);
1713 break;
1714 case CRYPTO_ALG_TYPE_AHASH:
1715 err = handle_ahash_req(rctx);
1716 if (err == -EAGAIN)
1717
1718
1719
1720
1721 err = 0;
1722 break;
1723 case CRYPTO_ALG_TYPE_AEAD:
1724 err = handle_aead_req(rctx);
1725 break;
1726 default:
1727 err = -EINVAL;
1728 }
1729
1730 if (err == -EINPROGRESS)
1731
1732 return;
1733 }
1734
1735cb_finish:
1736 finish_req(rctx, err);
1737}
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750static int skcipher_enqueue(struct skcipher_request *req, bool encrypt)
1751{
1752 struct iproc_reqctx_s *rctx = skcipher_request_ctx(req);
1753 struct iproc_ctx_s *ctx =
1754 crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1755 int err;
1756
1757 flow_log("%s() enc:%u\n", __func__, encrypt);
1758
1759 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1760 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1761 rctx->parent = &req->base;
1762 rctx->is_encrypt = encrypt;
1763 rctx->bd_suppress = false;
1764 rctx->total_todo = req->cryptlen;
1765 rctx->src_sent = 0;
1766 rctx->total_sent = 0;
1767 rctx->total_received = 0;
1768 rctx->ctx = ctx;
1769
1770
1771 rctx->src_sg = req->src;
1772 rctx->src_nents = 0;
1773 rctx->src_skip = 0;
1774 rctx->dst_sg = req->dst;
1775 rctx->dst_nents = 0;
1776 rctx->dst_skip = 0;
1777
1778 if (ctx->cipher.mode == CIPHER_MODE_CBC ||
1779 ctx->cipher.mode == CIPHER_MODE_CTR ||
1780 ctx->cipher.mode == CIPHER_MODE_OFB ||
1781 ctx->cipher.mode == CIPHER_MODE_XTS ||
1782 ctx->cipher.mode == CIPHER_MODE_GCM ||
1783 ctx->cipher.mode == CIPHER_MODE_CCM) {
1784 rctx->iv_ctr_len =
1785 crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
1786 memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len);
1787 } else {
1788 rctx->iv_ctr_len = 0;
1789 }
1790
1791
1792 rctx->chan_idx = select_channel();
1793 err = handle_skcipher_req(rctx);
1794 if (err != -EINPROGRESS)
1795
1796 spu_chunk_cleanup(rctx);
1797
1798 return err;
1799}
1800
1801static int des_setkey(struct crypto_skcipher *cipher, const u8 *key,
1802 unsigned int keylen)
1803{
1804 struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1805 int err;
1806
1807 err = verify_skcipher_des_key(cipher, key);
1808 if (err)
1809 return err;
1810
1811 ctx->cipher_type = CIPHER_TYPE_DES;
1812 return 0;
1813}
1814
1815static int threedes_setkey(struct crypto_skcipher *cipher, const u8 *key,
1816 unsigned int keylen)
1817{
1818 struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1819 int err;
1820
1821 err = verify_skcipher_des3_key(cipher, key);
1822 if (err)
1823 return err;
1824
1825 ctx->cipher_type = CIPHER_TYPE_3DES;
1826 return 0;
1827}
1828
1829static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
1830 unsigned int keylen)
1831{
1832 struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1833
1834 if (ctx->cipher.mode == CIPHER_MODE_XTS)
1835
1836 keylen = keylen / 2;
1837
1838 switch (keylen) {
1839 case AES_KEYSIZE_128:
1840 ctx->cipher_type = CIPHER_TYPE_AES128;
1841 break;
1842 case AES_KEYSIZE_192:
1843 ctx->cipher_type = CIPHER_TYPE_AES192;
1844 break;
1845 case AES_KEYSIZE_256:
1846 ctx->cipher_type = CIPHER_TYPE_AES256;
1847 break;
1848 default:
1849 return -EINVAL;
1850 }
1851 WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
1852 ((ctx->max_payload % AES_BLOCK_SIZE) != 0));
1853 return 0;
1854}
1855
1856static int rc4_setkey(struct crypto_skcipher *cipher, const u8 *key,
1857 unsigned int keylen)
1858{
1859 struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1860 int i;
1861
1862 ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
1863
1864 ctx->enckey[0] = 0x00;
1865 ctx->enckey[1] = 0x00;
1866 ctx->enckey[2] = 0x00;
1867 ctx->enckey[3] = 0x00;
1868 for (i = 0; i < ARC4_MAX_KEY_SIZE; i++)
1869 ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen];
1870
1871 ctx->cipher_type = CIPHER_TYPE_INIT;
1872
1873 return 0;
1874}
1875
1876static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
1877 unsigned int keylen)
1878{
1879 struct spu_hw *spu = &iproc_priv.spu;
1880 struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1881 struct spu_cipher_parms cipher_parms;
1882 u32 alloc_len = 0;
1883 int err;
1884
1885 flow_log("skcipher_setkey() keylen: %d\n", keylen);
1886 flow_dump(" key: ", key, keylen);
1887
1888 switch (ctx->cipher.alg) {
1889 case CIPHER_ALG_DES:
1890 err = des_setkey(cipher, key, keylen);
1891 break;
1892 case CIPHER_ALG_3DES:
1893 err = threedes_setkey(cipher, key, keylen);
1894 break;
1895 case CIPHER_ALG_AES:
1896 err = aes_setkey(cipher, key, keylen);
1897 break;
1898 case CIPHER_ALG_RC4:
1899 err = rc4_setkey(cipher, key, keylen);
1900 break;
1901 default:
1902 pr_err("%s() Error: unknown cipher alg\n", __func__);
1903 err = -EINVAL;
1904 }
1905 if (err)
1906 return err;
1907
1908
1909 if (ctx->cipher.alg != CIPHER_ALG_RC4) {
1910 memcpy(ctx->enckey, key, keylen);
1911 ctx->enckeylen = keylen;
1912 }
1913
1914 if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
1915 (ctx->cipher.mode == CIPHER_MODE_XTS)) {
1916 unsigned int xts_keylen = keylen / 2;
1917
1918 memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
1919 memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
1920 }
1921
1922 if (spu->spu_type == SPU_TYPE_SPUM)
1923 alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
1924 else if (spu->spu_type == SPU_TYPE_SPU2)
1925 alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
1926 memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
1927 cipher_parms.iv_buf = NULL;
1928 cipher_parms.iv_len = crypto_skcipher_ivsize(cipher);
1929 flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
1930
1931 cipher_parms.alg = ctx->cipher.alg;
1932 cipher_parms.mode = ctx->cipher.mode;
1933 cipher_parms.type = ctx->cipher_type;
1934 cipher_parms.key_buf = ctx->enckey;
1935 cipher_parms.key_len = ctx->enckeylen;
1936
1937
1938 memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1939 ctx->spu_req_hdr_len =
1940 spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
1941 &cipher_parms);
1942
1943 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
1944 ctx->enckeylen,
1945 false);
1946
1947 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
1948
1949 return 0;
1950}
1951
1952static int skcipher_encrypt(struct skcipher_request *req)
1953{
1954 flow_log("skcipher_encrypt() nbytes:%u\n", req->cryptlen);
1955
1956 return skcipher_enqueue(req, true);
1957}
1958
1959static int skcipher_decrypt(struct skcipher_request *req)
1960{
1961 flow_log("skcipher_decrypt() nbytes:%u\n", req->cryptlen);
1962 return skcipher_enqueue(req, false);
1963}
1964
1965static int ahash_enqueue(struct ahash_request *req)
1966{
1967 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1968 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1969 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
1970 int err;
1971 const char *alg_name;
1972
1973 flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
1974
1975 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1976 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1977 rctx->parent = &req->base;
1978 rctx->ctx = ctx;
1979 rctx->bd_suppress = true;
1980 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
1981
1982
1983 rctx->src_sg = req->src;
1984 rctx->src_skip = 0;
1985 rctx->src_nents = 0;
1986 rctx->dst_sg = NULL;
1987 rctx->dst_skip = 0;
1988 rctx->dst_nents = 0;
1989
1990
1991 if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
1992 (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
1993 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
1994 flow_log("Doing %sfinal %s zero-len hash request in software\n",
1995 rctx->is_final ? "" : "non-", alg_name);
1996 err = do_shash((unsigned char *)alg_name, req->result,
1997 NULL, 0, NULL, 0, ctx->authkey,
1998 ctx->authkeylen);
1999 if (err < 0)
2000 flow_log("Hash request failed with error %d\n", err);
2001 return err;
2002 }
2003
2004 rctx->chan_idx = select_channel();
2005
2006 err = handle_ahash_req(rctx);
2007 if (err != -EINPROGRESS)
2008
2009 spu_chunk_cleanup(rctx);
2010
2011 if (err == -EAGAIN)
2012
2013
2014
2015
2016 err = 0;
2017
2018 return err;
2019}
2020
2021static int __ahash_init(struct ahash_request *req)
2022{
2023 struct spu_hw *spu = &iproc_priv.spu;
2024 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2025 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2026 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2027
2028 flow_log("%s()\n", __func__);
2029
2030
2031 rctx->hash_carry_len = 0;
2032 rctx->is_final = 0;
2033
2034 rctx->total_todo = 0;
2035 rctx->src_sent = 0;
2036 rctx->total_sent = 0;
2037 rctx->total_received = 0;
2038
2039 ctx->digestsize = crypto_ahash_digestsize(tfm);
2040
2041 WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
2042
2043 rctx->is_sw_hmac = false;
2044
2045 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
2046 true);
2047
2048 return 0;
2049}
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2065{
2066 struct spu_hw *spu = &iproc_priv.spu;
2067
2068 if (spu->spu_type == SPU_TYPE_SPU2)
2069 return true;
2070
2071 if ((ctx->auth.alg == HASH_ALG_AES) &&
2072 (ctx->auth.mode == HASH_MODE_XCBC))
2073 return true;
2074
2075
2076 return false;
2077}
2078
2079static int ahash_init(struct ahash_request *req)
2080{
2081 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2082 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2083 const char *alg_name;
2084 struct crypto_shash *hash;
2085 int ret;
2086 gfp_t gfp;
2087
2088 if (spu_no_incr_hash(ctx)) {
2089
2090
2091
2092
2093
2094 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2095 hash = crypto_alloc_shash(alg_name, 0, 0);
2096 if (IS_ERR(hash)) {
2097 ret = PTR_ERR(hash);
2098 goto err;
2099 }
2100
2101 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2102 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2103 ctx->shash = kmalloc(sizeof(*ctx->shash) +
2104 crypto_shash_descsize(hash), gfp);
2105 if (!ctx->shash) {
2106 ret = -ENOMEM;
2107 goto err_hash;
2108 }
2109 ctx->shash->tfm = hash;
2110
2111
2112 if (ctx->authkeylen > 0) {
2113 ret = crypto_shash_setkey(hash, ctx->authkey,
2114 ctx->authkeylen);
2115 if (ret)
2116 goto err_shash;
2117 }
2118
2119
2120 ret = crypto_shash_init(ctx->shash);
2121 if (ret)
2122 goto err_shash;
2123 } else {
2124
2125 ret = __ahash_init(req);
2126 }
2127
2128 return ret;
2129
2130err_shash:
2131 kfree(ctx->shash);
2132err_hash:
2133 crypto_free_shash(hash);
2134err:
2135 return ret;
2136}
2137
2138static int __ahash_update(struct ahash_request *req)
2139{
2140 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2141
2142 flow_log("ahash_update() nbytes:%u\n", req->nbytes);
2143
2144 if (!req->nbytes)
2145 return 0;
2146 rctx->total_todo += req->nbytes;
2147 rctx->src_sent = 0;
2148
2149 return ahash_enqueue(req);
2150}
2151
2152static int ahash_update(struct ahash_request *req)
2153{
2154 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2155 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2156 u8 *tmpbuf;
2157 int ret;
2158 int nents;
2159 gfp_t gfp;
2160
2161 if (spu_no_incr_hash(ctx)) {
2162
2163
2164
2165
2166
2167 if (req->src)
2168 nents = sg_nents(req->src);
2169 else
2170 return -EINVAL;
2171
2172
2173 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2174 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2175 tmpbuf = kmalloc(req->nbytes, gfp);
2176 if (!tmpbuf)
2177 return -ENOMEM;
2178
2179 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2180 req->nbytes) {
2181 kfree(tmpbuf);
2182 return -EINVAL;
2183 }
2184
2185
2186 ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
2187 kfree(tmpbuf);
2188 } else {
2189
2190 ret = __ahash_update(req);
2191 }
2192
2193 return ret;
2194}
2195
2196static int __ahash_final(struct ahash_request *req)
2197{
2198 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2199
2200 flow_log("ahash_final() nbytes:%u\n", req->nbytes);
2201
2202 rctx->is_final = 1;
2203
2204 return ahash_enqueue(req);
2205}
2206
2207static int ahash_final(struct ahash_request *req)
2208{
2209 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2210 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2211 int ret;
2212
2213 if (spu_no_incr_hash(ctx)) {
2214
2215
2216
2217
2218
2219 ret = crypto_shash_final(ctx->shash, req->result);
2220
2221
2222 crypto_free_shash(ctx->shash->tfm);
2223 kfree(ctx->shash);
2224
2225 } else {
2226
2227 ret = __ahash_final(req);
2228 }
2229
2230 return ret;
2231}
2232
2233static int __ahash_finup(struct ahash_request *req)
2234{
2235 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2236
2237 flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
2238
2239 rctx->total_todo += req->nbytes;
2240 rctx->src_sent = 0;
2241 rctx->is_final = 1;
2242
2243 return ahash_enqueue(req);
2244}
2245
2246static int ahash_finup(struct ahash_request *req)
2247{
2248 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2249 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2250 u8 *tmpbuf;
2251 int ret;
2252 int nents;
2253 gfp_t gfp;
2254
2255 if (spu_no_incr_hash(ctx)) {
2256
2257
2258
2259
2260
2261 if (req->src) {
2262 nents = sg_nents(req->src);
2263 } else {
2264 ret = -EINVAL;
2265 goto ahash_finup_exit;
2266 }
2267
2268
2269 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2270 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2271 tmpbuf = kmalloc(req->nbytes, gfp);
2272 if (!tmpbuf) {
2273 ret = -ENOMEM;
2274 goto ahash_finup_exit;
2275 }
2276
2277 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2278 req->nbytes) {
2279 ret = -EINVAL;
2280 goto ahash_finup_free;
2281 }
2282
2283
2284 ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
2285 req->result);
2286 } else {
2287
2288 return __ahash_finup(req);
2289 }
2290ahash_finup_free:
2291 kfree(tmpbuf);
2292
2293ahash_finup_exit:
2294
2295 crypto_free_shash(ctx->shash->tfm);
2296 kfree(ctx->shash);
2297 return ret;
2298}
2299
2300static int ahash_digest(struct ahash_request *req)
2301{
2302 int err;
2303
2304 flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
2305
2306
2307 err = __ahash_init(req);
2308 if (!err)
2309 err = __ahash_finup(req);
2310
2311 return err;
2312}
2313
2314static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2315 unsigned int keylen)
2316{
2317 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2318
2319 flow_log("%s() ahash:%p key:%p keylen:%u\n",
2320 __func__, ahash, key, keylen);
2321 flow_dump(" key: ", key, keylen);
2322
2323 if (ctx->auth.alg == HASH_ALG_AES) {
2324 switch (keylen) {
2325 case AES_KEYSIZE_128:
2326 ctx->cipher_type = CIPHER_TYPE_AES128;
2327 break;
2328 case AES_KEYSIZE_192:
2329 ctx->cipher_type = CIPHER_TYPE_AES192;
2330 break;
2331 case AES_KEYSIZE_256:
2332 ctx->cipher_type = CIPHER_TYPE_AES256;
2333 break;
2334 default:
2335 pr_err("%s() Error: Invalid key length\n", __func__);
2336 return -EINVAL;
2337 }
2338 } else {
2339 pr_err("%s() Error: unknown hash alg\n", __func__);
2340 return -EINVAL;
2341 }
2342 memcpy(ctx->authkey, key, keylen);
2343 ctx->authkeylen = keylen;
2344
2345 return 0;
2346}
2347
2348static int ahash_export(struct ahash_request *req, void *out)
2349{
2350 const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2351 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
2352
2353 spu_exp->total_todo = rctx->total_todo;
2354 spu_exp->total_sent = rctx->total_sent;
2355 spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2356 memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2357 spu_exp->hash_carry_len = rctx->hash_carry_len;
2358 memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2359
2360 return 0;
2361}
2362
2363static int ahash_import(struct ahash_request *req, const void *in)
2364{
2365 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2366 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
2367
2368 rctx->total_todo = spu_exp->total_todo;
2369 rctx->total_sent = spu_exp->total_sent;
2370 rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2371 memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2372 rctx->hash_carry_len = spu_exp->hash_carry_len;
2373 memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2374
2375 return 0;
2376}
2377
2378static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
2379 unsigned int keylen)
2380{
2381 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2382 unsigned int blocksize =
2383 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
2384 unsigned int digestsize = crypto_ahash_digestsize(ahash);
2385 unsigned int index;
2386 int rc;
2387
2388 flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2389 __func__, ahash, key, keylen, blocksize, digestsize);
2390 flow_dump(" key: ", key, keylen);
2391
2392 if (keylen > blocksize) {
2393 switch (ctx->auth.alg) {
2394 case HASH_ALG_MD5:
2395 rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
2396 0, NULL, 0);
2397 break;
2398 case HASH_ALG_SHA1:
2399 rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
2400 0, NULL, 0);
2401 break;
2402 case HASH_ALG_SHA224:
2403 rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
2404 0, NULL, 0);
2405 break;
2406 case HASH_ALG_SHA256:
2407 rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
2408 0, NULL, 0);
2409 break;
2410 case HASH_ALG_SHA384:
2411 rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
2412 0, NULL, 0);
2413 break;
2414 case HASH_ALG_SHA512:
2415 rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
2416 0, NULL, 0);
2417 break;
2418 case HASH_ALG_SHA3_224:
2419 rc = do_shash("sha3-224", ctx->authkey, key, keylen,
2420 NULL, 0, NULL, 0);
2421 break;
2422 case HASH_ALG_SHA3_256:
2423 rc = do_shash("sha3-256", ctx->authkey, key, keylen,
2424 NULL, 0, NULL, 0);
2425 break;
2426 case HASH_ALG_SHA3_384:
2427 rc = do_shash("sha3-384", ctx->authkey, key, keylen,
2428 NULL, 0, NULL, 0);
2429 break;
2430 case HASH_ALG_SHA3_512:
2431 rc = do_shash("sha3-512", ctx->authkey, key, keylen,
2432 NULL, 0, NULL, 0);
2433 break;
2434 default:
2435 pr_err("%s() Error: unknown hash alg\n", __func__);
2436 return -EINVAL;
2437 }
2438 if (rc < 0) {
2439 pr_err("%s() Error %d computing shash for %s\n",
2440 __func__, rc, hash_alg_name[ctx->auth.alg]);
2441 return rc;
2442 }
2443 ctx->authkeylen = digestsize;
2444
2445 flow_log(" keylen > digestsize... hashed\n");
2446 flow_dump(" newkey: ", ctx->authkey, ctx->authkeylen);
2447 } else {
2448 memcpy(ctx->authkey, key, keylen);
2449 ctx->authkeylen = keylen;
2450 }
2451
2452
2453
2454
2455
2456
2457 if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
2458 memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
2459 memset(ctx->ipad + ctx->authkeylen, 0,
2460 blocksize - ctx->authkeylen);
2461 ctx->authkeylen = 0;
2462 memcpy(ctx->opad, ctx->ipad, blocksize);
2463
2464 for (index = 0; index < blocksize; index++) {
2465 ctx->ipad[index] ^= HMAC_IPAD_VALUE;
2466 ctx->opad[index] ^= HMAC_OPAD_VALUE;
2467 }
2468
2469 flow_dump(" ipad: ", ctx->ipad, blocksize);
2470 flow_dump(" opad: ", ctx->opad, blocksize);
2471 }
2472 ctx->digestsize = digestsize;
2473 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
2474
2475 return 0;
2476}
2477
2478static int ahash_hmac_init(struct ahash_request *req)
2479{
2480 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2481 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2482 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2483 unsigned int blocksize =
2484 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2485
2486 flow_log("ahash_hmac_init()\n");
2487
2488
2489 ahash_init(req);
2490
2491 if (!spu_no_incr_hash(ctx)) {
2492
2493 rctx->is_sw_hmac = true;
2494 ctx->auth.mode = HASH_MODE_HASH;
2495
2496 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2497 rctx->hash_carry_len = blocksize;
2498 rctx->total_todo += blocksize;
2499 }
2500
2501 return 0;
2502}
2503
2504static int ahash_hmac_update(struct ahash_request *req)
2505{
2506 flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
2507
2508 if (!req->nbytes)
2509 return 0;
2510
2511 return ahash_update(req);
2512}
2513
2514static int ahash_hmac_final(struct ahash_request *req)
2515{
2516 flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
2517
2518 return ahash_final(req);
2519}
2520
2521static int ahash_hmac_finup(struct ahash_request *req)
2522{
2523 flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
2524
2525 return ahash_finup(req);
2526}
2527
2528static int ahash_hmac_digest(struct ahash_request *req)
2529{
2530 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2531 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2532 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2533 unsigned int blocksize =
2534 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2535
2536 flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
2537
2538
2539 __ahash_init(req);
2540
2541 if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
2542
2543
2544
2545
2546
2547
2548
2549
2550 rctx->is_sw_hmac = false;
2551 ctx->auth.mode = HASH_MODE_HMAC;
2552 } else {
2553 rctx->is_sw_hmac = true;
2554 ctx->auth.mode = HASH_MODE_HASH;
2555
2556 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2557 rctx->hash_carry_len = blocksize;
2558 rctx->total_todo += blocksize;
2559 }
2560
2561 return __ahash_finup(req);
2562}
2563
2564
2565
2566static int aead_need_fallback(struct aead_request *req)
2567{
2568 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2569 struct spu_hw *spu = &iproc_priv.spu;
2570 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2571 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2572 u32 payload_len;
2573
2574
2575
2576
2577
2578 if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
2579 (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
2580 (req->assoclen == 0)) {
2581 if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2582 (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2583 flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2584 return 1;
2585 }
2586 }
2587
2588
2589 if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2590 (spu->spu_type == SPU_TYPE_SPUM) &&
2591 (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
2592 (ctx->digestsize != 16)) {
2593 flow_log("%s() AES CCM needs fallback for digest size %d\n",
2594 __func__, ctx->digestsize);
2595 return 1;
2596 }
2597
2598
2599
2600
2601
2602 if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2603 (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
2604 (req->assoclen == 0)) {
2605 flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2606 __func__);
2607 return 1;
2608 }
2609
2610
2611
2612
2613
2614 if (ctx->cipher.mode == CIPHER_MODE_GCM &&
2615 ctx->cipher.alg == CIPHER_ALG_AES &&
2616 rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE &&
2617 req->assoclen != 16 && req->assoclen != 20) {
2618 flow_log("RFC4106/RFC4543 needs fallback for assoclen"
2619 " other than 16 or 20 bytes\n");
2620 return 1;
2621 }
2622
2623 payload_len = req->cryptlen;
2624 if (spu->spu_type == SPU_TYPE_SPUM)
2625 payload_len += req->assoclen;
2626
2627 flow_log("%s() payload len: %u\n", __func__, payload_len);
2628
2629 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2630 return 0;
2631 else
2632 return payload_len > ctx->max_payload;
2633}
2634
2635static void aead_complete(struct crypto_async_request *areq, int err)
2636{
2637 struct aead_request *req =
2638 container_of(areq, struct aead_request, base);
2639 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2640 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2641
2642 flow_log("%s() err:%d\n", __func__, err);
2643
2644 areq->tfm = crypto_aead_tfm(aead);
2645
2646 areq->complete = rctx->old_complete;
2647 areq->data = rctx->old_data;
2648
2649 areq->complete(areq, err);
2650}
2651
2652static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
2653{
2654 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2655 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
2656 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2657 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
2658 int err;
2659 u32 req_flags;
2660
2661 flow_log("%s() enc:%u\n", __func__, is_encrypt);
2662
2663 if (ctx->fallback_cipher) {
2664
2665 rctx->old_tfm = tfm;
2666 aead_request_set_tfm(req, ctx->fallback_cipher);
2667
2668
2669
2670
2671 rctx->old_complete = req->base.complete;
2672 rctx->old_data = req->base.data;
2673 req_flags = aead_request_flags(req);
2674 aead_request_set_callback(req, req_flags, aead_complete, req);
2675 err = is_encrypt ? crypto_aead_encrypt(req) :
2676 crypto_aead_decrypt(req);
2677
2678 if (err == 0) {
2679
2680
2681
2682
2683 aead_request_set_callback(req, req_flags,
2684 rctx->old_complete, req);
2685 req->base.data = rctx->old_data;
2686 aead_request_set_tfm(req, aead);
2687 flow_log("%s() fallback completed successfully\n\n",
2688 __func__);
2689 }
2690 } else {
2691 err = -EINVAL;
2692 }
2693
2694 return err;
2695}
2696
2697static int aead_enqueue(struct aead_request *req, bool is_encrypt)
2698{
2699 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2700 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2701 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2702 int err;
2703
2704 flow_log("%s() enc:%u\n", __func__, is_encrypt);
2705
2706 if (req->assoclen > MAX_ASSOC_SIZE) {
2707 pr_err
2708 ("%s() Error: associated data too long. (%u > %u bytes)\n",
2709 __func__, req->assoclen, MAX_ASSOC_SIZE);
2710 return -EINVAL;
2711 }
2712
2713 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2714 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2715 rctx->parent = &req->base;
2716 rctx->is_encrypt = is_encrypt;
2717 rctx->bd_suppress = false;
2718 rctx->total_todo = req->cryptlen;
2719 rctx->src_sent = 0;
2720 rctx->total_sent = 0;
2721 rctx->total_received = 0;
2722 rctx->is_sw_hmac = false;
2723 rctx->ctx = ctx;
2724 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2725
2726
2727 rctx->assoc = req->src;
2728
2729
2730
2731
2732
2733
2734 if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2735 &rctx->src_skip) < 0) {
2736 pr_err("%s() Error: Unable to find start of src data\n",
2737 __func__);
2738 return -EINVAL;
2739 }
2740
2741 rctx->src_nents = 0;
2742 rctx->dst_nents = 0;
2743 if (req->dst == req->src) {
2744 rctx->dst_sg = rctx->src_sg;
2745 rctx->dst_skip = rctx->src_skip;
2746 } else {
2747
2748
2749
2750
2751
2752 if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2753 &rctx->dst_skip) < 0) {
2754 pr_err("%s() Error: Unable to find start of dst data\n",
2755 __func__);
2756 return -EINVAL;
2757 }
2758 }
2759
2760 if (ctx->cipher.mode == CIPHER_MODE_CBC ||
2761 ctx->cipher.mode == CIPHER_MODE_CTR ||
2762 ctx->cipher.mode == CIPHER_MODE_OFB ||
2763 ctx->cipher.mode == CIPHER_MODE_XTS ||
2764 ctx->cipher.mode == CIPHER_MODE_GCM) {
2765 rctx->iv_ctr_len =
2766 ctx->salt_len +
2767 crypto_aead_ivsize(crypto_aead_reqtfm(req));
2768 } else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
2769 rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2770 } else {
2771 rctx->iv_ctr_len = 0;
2772 }
2773
2774 rctx->hash_carry_len = 0;
2775
2776 flow_log(" src sg: %p\n", req->src);
2777 flow_log(" rctx->src_sg: %p, src_skip %u\n",
2778 rctx->src_sg, rctx->src_skip);
2779 flow_log(" assoc: %p, assoclen %u\n", rctx->assoc, req->assoclen);
2780 flow_log(" dst sg: %p\n", req->dst);
2781 flow_log(" rctx->dst_sg: %p, dst_skip %u\n",
2782 rctx->dst_sg, rctx->dst_skip);
2783 flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len);
2784 flow_dump(" iv: ", req->iv, rctx->iv_ctr_len);
2785 flow_log(" authkeylen:%u\n", ctx->authkeylen);
2786 flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no");
2787
2788 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2789 flow_log(" max_payload infinite");
2790 else
2791 flow_log(" max_payload: %u\n", ctx->max_payload);
2792
2793 if (unlikely(aead_need_fallback(req)))
2794 return aead_do_fallback(req, is_encrypt);
2795
2796
2797
2798
2799
2800 if (rctx->iv_ctr_len) {
2801 if (ctx->salt_len)
2802 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2803 ctx->salt, ctx->salt_len);
2804 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2805 req->iv,
2806 rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2807 }
2808
2809 rctx->chan_idx = select_channel();
2810 err = handle_aead_req(rctx);
2811 if (err != -EINPROGRESS)
2812
2813 spu_chunk_cleanup(rctx);
2814
2815 return err;
2816}
2817
2818static int aead_authenc_setkey(struct crypto_aead *cipher,
2819 const u8 *key, unsigned int keylen)
2820{
2821 struct spu_hw *spu = &iproc_priv.spu;
2822 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2823 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2824 struct crypto_authenc_keys keys;
2825 int ret;
2826
2827 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2828 keylen);
2829 flow_dump(" key: ", key, keylen);
2830
2831 ret = crypto_authenc_extractkeys(&keys, key, keylen);
2832 if (ret)
2833 goto badkey;
2834
2835 if (keys.enckeylen > MAX_KEY_SIZE ||
2836 keys.authkeylen > MAX_KEY_SIZE)
2837 goto badkey;
2838
2839 ctx->enckeylen = keys.enckeylen;
2840 ctx->authkeylen = keys.authkeylen;
2841
2842 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
2843
2844 memset(ctx->authkey, 0, sizeof(ctx->authkey));
2845 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
2846
2847 switch (ctx->alg->cipher_info.alg) {
2848 case CIPHER_ALG_DES:
2849 if (verify_aead_des_key(cipher, keys.enckey, keys.enckeylen))
2850 return -EINVAL;
2851
2852 ctx->cipher_type = CIPHER_TYPE_DES;
2853 break;
2854 case CIPHER_ALG_3DES:
2855 if (verify_aead_des3_key(cipher, keys.enckey, keys.enckeylen))
2856 return -EINVAL;
2857
2858 ctx->cipher_type = CIPHER_TYPE_3DES;
2859 break;
2860 case CIPHER_ALG_AES:
2861 switch (ctx->enckeylen) {
2862 case AES_KEYSIZE_128:
2863 ctx->cipher_type = CIPHER_TYPE_AES128;
2864 break;
2865 case AES_KEYSIZE_192:
2866 ctx->cipher_type = CIPHER_TYPE_AES192;
2867 break;
2868 case AES_KEYSIZE_256:
2869 ctx->cipher_type = CIPHER_TYPE_AES256;
2870 break;
2871 default:
2872 goto badkey;
2873 }
2874 break;
2875 case CIPHER_ALG_RC4:
2876 ctx->cipher_type = CIPHER_TYPE_INIT;
2877 break;
2878 default:
2879 pr_err("%s() Error: Unknown cipher alg\n", __func__);
2880 return -EINVAL;
2881 }
2882
2883 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2884 ctx->authkeylen);
2885 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
2886 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
2887
2888
2889 if (ctx->fallback_cipher) {
2890 flow_log(" running fallback setkey()\n");
2891
2892 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2893 ctx->fallback_cipher->base.crt_flags |=
2894 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2895 ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
2896 if (ret)
2897 flow_log(" fallback setkey() returned:%d\n", ret);
2898 }
2899
2900 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2901 ctx->enckeylen,
2902 false);
2903
2904 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2905
2906 return ret;
2907
2908badkey:
2909 ctx->enckeylen = 0;
2910 ctx->authkeylen = 0;
2911 ctx->digestsize = 0;
2912
2913 return -EINVAL;
2914}
2915
2916static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
2917 const u8 *key, unsigned int keylen)
2918{
2919 struct spu_hw *spu = &iproc_priv.spu;
2920 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2921 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2922
2923 int ret = 0;
2924
2925 flow_log("%s() keylen:%u\n", __func__, keylen);
2926 flow_dump(" key: ", key, keylen);
2927
2928 if (!ctx->is_esp)
2929 ctx->digestsize = keylen;
2930
2931 ctx->enckeylen = keylen;
2932 ctx->authkeylen = 0;
2933 memcpy(ctx->enckey, key, ctx->enckeylen);
2934
2935 switch (ctx->enckeylen) {
2936 case AES_KEYSIZE_128:
2937 ctx->cipher_type = CIPHER_TYPE_AES128;
2938 break;
2939 case AES_KEYSIZE_192:
2940 ctx->cipher_type = CIPHER_TYPE_AES192;
2941 break;
2942 case AES_KEYSIZE_256:
2943 ctx->cipher_type = CIPHER_TYPE_AES256;
2944 break;
2945 default:
2946 goto badkey;
2947 }
2948
2949 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2950 ctx->authkeylen);
2951 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
2952 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
2953
2954
2955 if (ctx->fallback_cipher) {
2956 flow_log(" running fallback setkey()\n");
2957
2958 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2959 ctx->fallback_cipher->base.crt_flags |=
2960 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2961 ret = crypto_aead_setkey(ctx->fallback_cipher, key,
2962 keylen + ctx->salt_len);
2963 if (ret)
2964 flow_log(" fallback setkey() returned:%d\n", ret);
2965 }
2966
2967 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2968 ctx->enckeylen,
2969 false);
2970
2971 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2972
2973 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2974 ctx->authkeylen);
2975
2976 return ret;
2977
2978badkey:
2979 ctx->enckeylen = 0;
2980 ctx->authkeylen = 0;
2981 ctx->digestsize = 0;
2982
2983 return -EINVAL;
2984}
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
2998 const u8 *key, unsigned int keylen)
2999{
3000 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3001
3002 flow_log("%s\n", __func__);
3003 ctx->salt_len = GCM_ESP_SALT_SIZE;
3004 ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3005 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3006 keylen -= GCM_ESP_SALT_SIZE;
3007 ctx->digestsize = GCM_ESP_DIGESTSIZE;
3008 ctx->is_esp = true;
3009 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3010
3011 return aead_gcm_ccm_setkey(cipher, key, keylen);
3012}
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
3026 const u8 *key, unsigned int keylen)
3027{
3028 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3029
3030 flow_log("%s\n", __func__);
3031 ctx->salt_len = GCM_ESP_SALT_SIZE;
3032 ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3033 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3034 keylen -= GCM_ESP_SALT_SIZE;
3035 ctx->digestsize = GCM_ESP_DIGESTSIZE;
3036 ctx->is_esp = true;
3037 ctx->is_rfc4543 = true;
3038 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3039
3040 return aead_gcm_ccm_setkey(cipher, key, keylen);
3041}
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
3055 const u8 *key, unsigned int keylen)
3056{
3057 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3058
3059 flow_log("%s\n", __func__);
3060 ctx->salt_len = CCM_ESP_SALT_SIZE;
3061 ctx->salt_offset = CCM_ESP_SALT_OFFSET;
3062 memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
3063 keylen -= CCM_ESP_SALT_SIZE;
3064 ctx->is_esp = true;
3065 flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
3066
3067 return aead_gcm_ccm_setkey(cipher, key, keylen);
3068}
3069
3070static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
3071{
3072 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3073 int ret = 0;
3074
3075 flow_log("%s() authkeylen:%u authsize:%u\n",
3076 __func__, ctx->authkeylen, authsize);
3077
3078 ctx->digestsize = authsize;
3079
3080
3081 if (ctx->fallback_cipher) {
3082 flow_log(" running fallback setauth()\n");
3083
3084 ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
3085 if (ret)
3086 flow_log(" fallback setauth() returned:%d\n", ret);
3087 }
3088
3089 return ret;
3090}
3091
3092static int aead_encrypt(struct aead_request *req)
3093{
3094 flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
3095 req->cryptlen);
3096 dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3097 flow_log(" assoc_len:%u\n", req->assoclen);
3098
3099 return aead_enqueue(req, true);
3100}
3101
3102static int aead_decrypt(struct aead_request *req)
3103{
3104 flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
3105 dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3106 flow_log(" assoc_len:%u\n", req->assoclen);
3107
3108 return aead_enqueue(req, false);
3109}
3110
3111
3112
3113static struct iproc_alg_s driver_algs[] = {
3114 {
3115 .type = CRYPTO_ALG_TYPE_AEAD,
3116 .alg.aead = {
3117 .base = {
3118 .cra_name = "gcm(aes)",
3119 .cra_driver_name = "gcm-aes-iproc",
3120 .cra_blocksize = AES_BLOCK_SIZE,
3121 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3122 },
3123 .setkey = aead_gcm_ccm_setkey,
3124 .ivsize = GCM_AES_IV_SIZE,
3125 .maxauthsize = AES_BLOCK_SIZE,
3126 },
3127 .cipher_info = {
3128 .alg = CIPHER_ALG_AES,
3129 .mode = CIPHER_MODE_GCM,
3130 },
3131 .auth_info = {
3132 .alg = HASH_ALG_AES,
3133 .mode = HASH_MODE_GCM,
3134 },
3135 .auth_first = 0,
3136 },
3137 {
3138 .type = CRYPTO_ALG_TYPE_AEAD,
3139 .alg.aead = {
3140 .base = {
3141 .cra_name = "ccm(aes)",
3142 .cra_driver_name = "ccm-aes-iproc",
3143 .cra_blocksize = AES_BLOCK_SIZE,
3144 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3145 },
3146 .setkey = aead_gcm_ccm_setkey,
3147 .ivsize = CCM_AES_IV_SIZE,
3148 .maxauthsize = AES_BLOCK_SIZE,
3149 },
3150 .cipher_info = {
3151 .alg = CIPHER_ALG_AES,
3152 .mode = CIPHER_MODE_CCM,
3153 },
3154 .auth_info = {
3155 .alg = HASH_ALG_AES,
3156 .mode = HASH_MODE_CCM,
3157 },
3158 .auth_first = 0,
3159 },
3160 {
3161 .type = CRYPTO_ALG_TYPE_AEAD,
3162 .alg.aead = {
3163 .base = {
3164 .cra_name = "rfc4106(gcm(aes))",
3165 .cra_driver_name = "gcm-aes-esp-iproc",
3166 .cra_blocksize = AES_BLOCK_SIZE,
3167 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3168 },
3169 .setkey = aead_gcm_esp_setkey,
3170 .ivsize = GCM_RFC4106_IV_SIZE,
3171 .maxauthsize = AES_BLOCK_SIZE,
3172 },
3173 .cipher_info = {
3174 .alg = CIPHER_ALG_AES,
3175 .mode = CIPHER_MODE_GCM,
3176 },
3177 .auth_info = {
3178 .alg = HASH_ALG_AES,
3179 .mode = HASH_MODE_GCM,
3180 },
3181 .auth_first = 0,
3182 },
3183 {
3184 .type = CRYPTO_ALG_TYPE_AEAD,
3185 .alg.aead = {
3186 .base = {
3187 .cra_name = "rfc4309(ccm(aes))",
3188 .cra_driver_name = "ccm-aes-esp-iproc",
3189 .cra_blocksize = AES_BLOCK_SIZE,
3190 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3191 },
3192 .setkey = aead_ccm_esp_setkey,
3193 .ivsize = CCM_AES_IV_SIZE,
3194 .maxauthsize = AES_BLOCK_SIZE,
3195 },
3196 .cipher_info = {
3197 .alg = CIPHER_ALG_AES,
3198 .mode = CIPHER_MODE_CCM,
3199 },
3200 .auth_info = {
3201 .alg = HASH_ALG_AES,
3202 .mode = HASH_MODE_CCM,
3203 },
3204 .auth_first = 0,
3205 },
3206 {
3207 .type = CRYPTO_ALG_TYPE_AEAD,
3208 .alg.aead = {
3209 .base = {
3210 .cra_name = "rfc4543(gcm(aes))",
3211 .cra_driver_name = "gmac-aes-esp-iproc",
3212 .cra_blocksize = AES_BLOCK_SIZE,
3213 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3214 },
3215 .setkey = rfc4543_gcm_esp_setkey,
3216 .ivsize = GCM_RFC4106_IV_SIZE,
3217 .maxauthsize = AES_BLOCK_SIZE,
3218 },
3219 .cipher_info = {
3220 .alg = CIPHER_ALG_AES,
3221 .mode = CIPHER_MODE_GCM,
3222 },
3223 .auth_info = {
3224 .alg = HASH_ALG_AES,
3225 .mode = HASH_MODE_GCM,
3226 },
3227 .auth_first = 0,
3228 },
3229 {
3230 .type = CRYPTO_ALG_TYPE_AEAD,
3231 .alg.aead = {
3232 .base = {
3233 .cra_name = "authenc(hmac(md5),cbc(aes))",
3234 .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
3235 .cra_blocksize = AES_BLOCK_SIZE,
3236 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3237 CRYPTO_ALG_ASYNC |
3238 CRYPTO_ALG_ALLOCATES_MEMORY
3239 },
3240 .setkey = aead_authenc_setkey,
3241 .ivsize = AES_BLOCK_SIZE,
3242 .maxauthsize = MD5_DIGEST_SIZE,
3243 },
3244 .cipher_info = {
3245 .alg = CIPHER_ALG_AES,
3246 .mode = CIPHER_MODE_CBC,
3247 },
3248 .auth_info = {
3249 .alg = HASH_ALG_MD5,
3250 .mode = HASH_MODE_HMAC,
3251 },
3252 .auth_first = 0,
3253 },
3254 {
3255 .type = CRYPTO_ALG_TYPE_AEAD,
3256 .alg.aead = {
3257 .base = {
3258 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3259 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
3260 .cra_blocksize = AES_BLOCK_SIZE,
3261 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3262 CRYPTO_ALG_ASYNC |
3263 CRYPTO_ALG_ALLOCATES_MEMORY
3264 },
3265 .setkey = aead_authenc_setkey,
3266 .ivsize = AES_BLOCK_SIZE,
3267 .maxauthsize = SHA1_DIGEST_SIZE,
3268 },
3269 .cipher_info = {
3270 .alg = CIPHER_ALG_AES,
3271 .mode = CIPHER_MODE_CBC,
3272 },
3273 .auth_info = {
3274 .alg = HASH_ALG_SHA1,
3275 .mode = HASH_MODE_HMAC,
3276 },
3277 .auth_first = 0,
3278 },
3279 {
3280 .type = CRYPTO_ALG_TYPE_AEAD,
3281 .alg.aead = {
3282 .base = {
3283 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3284 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
3285 .cra_blocksize = AES_BLOCK_SIZE,
3286 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3287 CRYPTO_ALG_ASYNC |
3288 CRYPTO_ALG_ALLOCATES_MEMORY
3289 },
3290 .setkey = aead_authenc_setkey,
3291 .ivsize = AES_BLOCK_SIZE,
3292 .maxauthsize = SHA256_DIGEST_SIZE,
3293 },
3294 .cipher_info = {
3295 .alg = CIPHER_ALG_AES,
3296 .mode = CIPHER_MODE_CBC,
3297 },
3298 .auth_info = {
3299 .alg = HASH_ALG_SHA256,
3300 .mode = HASH_MODE_HMAC,
3301 },
3302 .auth_first = 0,
3303 },
3304 {
3305 .type = CRYPTO_ALG_TYPE_AEAD,
3306 .alg.aead = {
3307 .base = {
3308 .cra_name = "authenc(hmac(md5),cbc(des))",
3309 .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
3310 .cra_blocksize = DES_BLOCK_SIZE,
3311 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3312 CRYPTO_ALG_ASYNC |
3313 CRYPTO_ALG_ALLOCATES_MEMORY
3314 },
3315 .setkey = aead_authenc_setkey,
3316 .ivsize = DES_BLOCK_SIZE,
3317 .maxauthsize = MD5_DIGEST_SIZE,
3318 },
3319 .cipher_info = {
3320 .alg = CIPHER_ALG_DES,
3321 .mode = CIPHER_MODE_CBC,
3322 },
3323 .auth_info = {
3324 .alg = HASH_ALG_MD5,
3325 .mode = HASH_MODE_HMAC,
3326 },
3327 .auth_first = 0,
3328 },
3329 {
3330 .type = CRYPTO_ALG_TYPE_AEAD,
3331 .alg.aead = {
3332 .base = {
3333 .cra_name = "authenc(hmac(sha1),cbc(des))",
3334 .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
3335 .cra_blocksize = DES_BLOCK_SIZE,
3336 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3337 CRYPTO_ALG_ASYNC |
3338 CRYPTO_ALG_ALLOCATES_MEMORY
3339 },
3340 .setkey = aead_authenc_setkey,
3341 .ivsize = DES_BLOCK_SIZE,
3342 .maxauthsize = SHA1_DIGEST_SIZE,
3343 },
3344 .cipher_info = {
3345 .alg = CIPHER_ALG_DES,
3346 .mode = CIPHER_MODE_CBC,
3347 },
3348 .auth_info = {
3349 .alg = HASH_ALG_SHA1,
3350 .mode = HASH_MODE_HMAC,
3351 },
3352 .auth_first = 0,
3353 },
3354 {
3355 .type = CRYPTO_ALG_TYPE_AEAD,
3356 .alg.aead = {
3357 .base = {
3358 .cra_name = "authenc(hmac(sha224),cbc(des))",
3359 .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
3360 .cra_blocksize = DES_BLOCK_SIZE,
3361 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3362 CRYPTO_ALG_ASYNC |
3363 CRYPTO_ALG_ALLOCATES_MEMORY
3364 },
3365 .setkey = aead_authenc_setkey,
3366 .ivsize = DES_BLOCK_SIZE,
3367 .maxauthsize = SHA224_DIGEST_SIZE,
3368 },
3369 .cipher_info = {
3370 .alg = CIPHER_ALG_DES,
3371 .mode = CIPHER_MODE_CBC,
3372 },
3373 .auth_info = {
3374 .alg = HASH_ALG_SHA224,
3375 .mode = HASH_MODE_HMAC,
3376 },
3377 .auth_first = 0,
3378 },
3379 {
3380 .type = CRYPTO_ALG_TYPE_AEAD,
3381 .alg.aead = {
3382 .base = {
3383 .cra_name = "authenc(hmac(sha256),cbc(des))",
3384 .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
3385 .cra_blocksize = DES_BLOCK_SIZE,
3386 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3387 CRYPTO_ALG_ASYNC |
3388 CRYPTO_ALG_ALLOCATES_MEMORY
3389 },
3390 .setkey = aead_authenc_setkey,
3391 .ivsize = DES_BLOCK_SIZE,
3392 .maxauthsize = SHA256_DIGEST_SIZE,
3393 },
3394 .cipher_info = {
3395 .alg = CIPHER_ALG_DES,
3396 .mode = CIPHER_MODE_CBC,
3397 },
3398 .auth_info = {
3399 .alg = HASH_ALG_SHA256,
3400 .mode = HASH_MODE_HMAC,
3401 },
3402 .auth_first = 0,
3403 },
3404 {
3405 .type = CRYPTO_ALG_TYPE_AEAD,
3406 .alg.aead = {
3407 .base = {
3408 .cra_name = "authenc(hmac(sha384),cbc(des))",
3409 .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
3410 .cra_blocksize = DES_BLOCK_SIZE,
3411 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3412 CRYPTO_ALG_ASYNC |
3413 CRYPTO_ALG_ALLOCATES_MEMORY
3414 },
3415 .setkey = aead_authenc_setkey,
3416 .ivsize = DES_BLOCK_SIZE,
3417 .maxauthsize = SHA384_DIGEST_SIZE,
3418 },
3419 .cipher_info = {
3420 .alg = CIPHER_ALG_DES,
3421 .mode = CIPHER_MODE_CBC,
3422 },
3423 .auth_info = {
3424 .alg = HASH_ALG_SHA384,
3425 .mode = HASH_MODE_HMAC,
3426 },
3427 .auth_first = 0,
3428 },
3429 {
3430 .type = CRYPTO_ALG_TYPE_AEAD,
3431 .alg.aead = {
3432 .base = {
3433 .cra_name = "authenc(hmac(sha512),cbc(des))",
3434 .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
3435 .cra_blocksize = DES_BLOCK_SIZE,
3436 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3437 CRYPTO_ALG_ASYNC |
3438 CRYPTO_ALG_ALLOCATES_MEMORY
3439 },
3440 .setkey = aead_authenc_setkey,
3441 .ivsize = DES_BLOCK_SIZE,
3442 .maxauthsize = SHA512_DIGEST_SIZE,
3443 },
3444 .cipher_info = {
3445 .alg = CIPHER_ALG_DES,
3446 .mode = CIPHER_MODE_CBC,
3447 },
3448 .auth_info = {
3449 .alg = HASH_ALG_SHA512,
3450 .mode = HASH_MODE_HMAC,
3451 },
3452 .auth_first = 0,
3453 },
3454 {
3455 .type = CRYPTO_ALG_TYPE_AEAD,
3456 .alg.aead = {
3457 .base = {
3458 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3459 .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
3460 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3461 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3462 CRYPTO_ALG_ASYNC |
3463 CRYPTO_ALG_ALLOCATES_MEMORY
3464 },
3465 .setkey = aead_authenc_setkey,
3466 .ivsize = DES3_EDE_BLOCK_SIZE,
3467 .maxauthsize = MD5_DIGEST_SIZE,
3468 },
3469 .cipher_info = {
3470 .alg = CIPHER_ALG_3DES,
3471 .mode = CIPHER_MODE_CBC,
3472 },
3473 .auth_info = {
3474 .alg = HASH_ALG_MD5,
3475 .mode = HASH_MODE_HMAC,
3476 },
3477 .auth_first = 0,
3478 },
3479 {
3480 .type = CRYPTO_ALG_TYPE_AEAD,
3481 .alg.aead = {
3482 .base = {
3483 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3484 .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
3485 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3486 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3487 CRYPTO_ALG_ASYNC |
3488 CRYPTO_ALG_ALLOCATES_MEMORY
3489 },
3490 .setkey = aead_authenc_setkey,
3491 .ivsize = DES3_EDE_BLOCK_SIZE,
3492 .maxauthsize = SHA1_DIGEST_SIZE,
3493 },
3494 .cipher_info = {
3495 .alg = CIPHER_ALG_3DES,
3496 .mode = CIPHER_MODE_CBC,
3497 },
3498 .auth_info = {
3499 .alg = HASH_ALG_SHA1,
3500 .mode = HASH_MODE_HMAC,
3501 },
3502 .auth_first = 0,
3503 },
3504 {
3505 .type = CRYPTO_ALG_TYPE_AEAD,
3506 .alg.aead = {
3507 .base = {
3508 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
3509 .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
3510 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3511 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3512 CRYPTO_ALG_ASYNC |
3513 CRYPTO_ALG_ALLOCATES_MEMORY
3514 },
3515 .setkey = aead_authenc_setkey,
3516 .ivsize = DES3_EDE_BLOCK_SIZE,
3517 .maxauthsize = SHA224_DIGEST_SIZE,
3518 },
3519 .cipher_info = {
3520 .alg = CIPHER_ALG_3DES,
3521 .mode = CIPHER_MODE_CBC,
3522 },
3523 .auth_info = {
3524 .alg = HASH_ALG_SHA224,
3525 .mode = HASH_MODE_HMAC,
3526 },
3527 .auth_first = 0,
3528 },
3529 {
3530 .type = CRYPTO_ALG_TYPE_AEAD,
3531 .alg.aead = {
3532 .base = {
3533 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
3534 .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
3535 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3536 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3537 CRYPTO_ALG_ASYNC |
3538 CRYPTO_ALG_ALLOCATES_MEMORY
3539 },
3540 .setkey = aead_authenc_setkey,
3541 .ivsize = DES3_EDE_BLOCK_SIZE,
3542 .maxauthsize = SHA256_DIGEST_SIZE,
3543 },
3544 .cipher_info = {
3545 .alg = CIPHER_ALG_3DES,
3546 .mode = CIPHER_MODE_CBC,
3547 },
3548 .auth_info = {
3549 .alg = HASH_ALG_SHA256,
3550 .mode = HASH_MODE_HMAC,
3551 },
3552 .auth_first = 0,
3553 },
3554 {
3555 .type = CRYPTO_ALG_TYPE_AEAD,
3556 .alg.aead = {
3557 .base = {
3558 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
3559 .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
3560 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3561 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3562 CRYPTO_ALG_ASYNC |
3563 CRYPTO_ALG_ALLOCATES_MEMORY
3564 },
3565 .setkey = aead_authenc_setkey,
3566 .ivsize = DES3_EDE_BLOCK_SIZE,
3567 .maxauthsize = SHA384_DIGEST_SIZE,
3568 },
3569 .cipher_info = {
3570 .alg = CIPHER_ALG_3DES,
3571 .mode = CIPHER_MODE_CBC,
3572 },
3573 .auth_info = {
3574 .alg = HASH_ALG_SHA384,
3575 .mode = HASH_MODE_HMAC,
3576 },
3577 .auth_first = 0,
3578 },
3579 {
3580 .type = CRYPTO_ALG_TYPE_AEAD,
3581 .alg.aead = {
3582 .base = {
3583 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
3584 .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
3585 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3586 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3587 CRYPTO_ALG_ASYNC |
3588 CRYPTO_ALG_ALLOCATES_MEMORY
3589 },
3590 .setkey = aead_authenc_setkey,
3591 .ivsize = DES3_EDE_BLOCK_SIZE,
3592 .maxauthsize = SHA512_DIGEST_SIZE,
3593 },
3594 .cipher_info = {
3595 .alg = CIPHER_ALG_3DES,
3596 .mode = CIPHER_MODE_CBC,
3597 },
3598 .auth_info = {
3599 .alg = HASH_ALG_SHA512,
3600 .mode = HASH_MODE_HMAC,
3601 },
3602 .auth_first = 0,
3603 },
3604
3605
3606 {
3607 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3608 .alg.skcipher = {
3609 .base.cra_name = "ecb(arc4)",
3610 .base.cra_driver_name = "ecb-arc4-iproc",
3611 .base.cra_blocksize = ARC4_BLOCK_SIZE,
3612 .min_keysize = ARC4_MIN_KEY_SIZE,
3613 .max_keysize = ARC4_MAX_KEY_SIZE,
3614 .ivsize = 0,
3615 },
3616 .cipher_info = {
3617 .alg = CIPHER_ALG_RC4,
3618 .mode = CIPHER_MODE_NONE,
3619 },
3620 .auth_info = {
3621 .alg = HASH_ALG_NONE,
3622 .mode = HASH_MODE_NONE,
3623 },
3624 },
3625 {
3626 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3627 .alg.skcipher = {
3628 .base.cra_name = "ofb(des)",
3629 .base.cra_driver_name = "ofb-des-iproc",
3630 .base.cra_blocksize = DES_BLOCK_SIZE,
3631 .min_keysize = DES_KEY_SIZE,
3632 .max_keysize = DES_KEY_SIZE,
3633 .ivsize = DES_BLOCK_SIZE,
3634 },
3635 .cipher_info = {
3636 .alg = CIPHER_ALG_DES,
3637 .mode = CIPHER_MODE_OFB,
3638 },
3639 .auth_info = {
3640 .alg = HASH_ALG_NONE,
3641 .mode = HASH_MODE_NONE,
3642 },
3643 },
3644 {
3645 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3646 .alg.skcipher = {
3647 .base.cra_name = "cbc(des)",
3648 .base.cra_driver_name = "cbc-des-iproc",
3649 .base.cra_blocksize = DES_BLOCK_SIZE,
3650 .min_keysize = DES_KEY_SIZE,
3651 .max_keysize = DES_KEY_SIZE,
3652 .ivsize = DES_BLOCK_SIZE,
3653 },
3654 .cipher_info = {
3655 .alg = CIPHER_ALG_DES,
3656 .mode = CIPHER_MODE_CBC,
3657 },
3658 .auth_info = {
3659 .alg = HASH_ALG_NONE,
3660 .mode = HASH_MODE_NONE,
3661 },
3662 },
3663 {
3664 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3665 .alg.skcipher = {
3666 .base.cra_name = "ecb(des)",
3667 .base.cra_driver_name = "ecb-des-iproc",
3668 .base.cra_blocksize = DES_BLOCK_SIZE,
3669 .min_keysize = DES_KEY_SIZE,
3670 .max_keysize = DES_KEY_SIZE,
3671 .ivsize = 0,
3672 },
3673 .cipher_info = {
3674 .alg = CIPHER_ALG_DES,
3675 .mode = CIPHER_MODE_ECB,
3676 },
3677 .auth_info = {
3678 .alg = HASH_ALG_NONE,
3679 .mode = HASH_MODE_NONE,
3680 },
3681 },
3682 {
3683 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3684 .alg.skcipher = {
3685 .base.cra_name = "ofb(des3_ede)",
3686 .base.cra_driver_name = "ofb-des3-iproc",
3687 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3688 .min_keysize = DES3_EDE_KEY_SIZE,
3689 .max_keysize = DES3_EDE_KEY_SIZE,
3690 .ivsize = DES3_EDE_BLOCK_SIZE,
3691 },
3692 .cipher_info = {
3693 .alg = CIPHER_ALG_3DES,
3694 .mode = CIPHER_MODE_OFB,
3695 },
3696 .auth_info = {
3697 .alg = HASH_ALG_NONE,
3698 .mode = HASH_MODE_NONE,
3699 },
3700 },
3701 {
3702 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3703 .alg.skcipher = {
3704 .base.cra_name = "cbc(des3_ede)",
3705 .base.cra_driver_name = "cbc-des3-iproc",
3706 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3707 .min_keysize = DES3_EDE_KEY_SIZE,
3708 .max_keysize = DES3_EDE_KEY_SIZE,
3709 .ivsize = DES3_EDE_BLOCK_SIZE,
3710 },
3711 .cipher_info = {
3712 .alg = CIPHER_ALG_3DES,
3713 .mode = CIPHER_MODE_CBC,
3714 },
3715 .auth_info = {
3716 .alg = HASH_ALG_NONE,
3717 .mode = HASH_MODE_NONE,
3718 },
3719 },
3720 {
3721 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3722 .alg.skcipher = {
3723 .base.cra_name = "ecb(des3_ede)",
3724 .base.cra_driver_name = "ecb-des3-iproc",
3725 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3726 .min_keysize = DES3_EDE_KEY_SIZE,
3727 .max_keysize = DES3_EDE_KEY_SIZE,
3728 .ivsize = 0,
3729 },
3730 .cipher_info = {
3731 .alg = CIPHER_ALG_3DES,
3732 .mode = CIPHER_MODE_ECB,
3733 },
3734 .auth_info = {
3735 .alg = HASH_ALG_NONE,
3736 .mode = HASH_MODE_NONE,
3737 },
3738 },
3739 {
3740 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3741 .alg.skcipher = {
3742 .base.cra_name = "ofb(aes)",
3743 .base.cra_driver_name = "ofb-aes-iproc",
3744 .base.cra_blocksize = AES_BLOCK_SIZE,
3745 .min_keysize = AES_MIN_KEY_SIZE,
3746 .max_keysize = AES_MAX_KEY_SIZE,
3747 .ivsize = AES_BLOCK_SIZE,
3748 },
3749 .cipher_info = {
3750 .alg = CIPHER_ALG_AES,
3751 .mode = CIPHER_MODE_OFB,
3752 },
3753 .auth_info = {
3754 .alg = HASH_ALG_NONE,
3755 .mode = HASH_MODE_NONE,
3756 },
3757 },
3758 {
3759 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3760 .alg.skcipher = {
3761 .base.cra_name = "cbc(aes)",
3762 .base.cra_driver_name = "cbc-aes-iproc",
3763 .base.cra_blocksize = AES_BLOCK_SIZE,
3764 .min_keysize = AES_MIN_KEY_SIZE,
3765 .max_keysize = AES_MAX_KEY_SIZE,
3766 .ivsize = AES_BLOCK_SIZE,
3767 },
3768 .cipher_info = {
3769 .alg = CIPHER_ALG_AES,
3770 .mode = CIPHER_MODE_CBC,
3771 },
3772 .auth_info = {
3773 .alg = HASH_ALG_NONE,
3774 .mode = HASH_MODE_NONE,
3775 },
3776 },
3777 {
3778 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3779 .alg.skcipher = {
3780 .base.cra_name = "ecb(aes)",
3781 .base.cra_driver_name = "ecb-aes-iproc",
3782 .base.cra_blocksize = AES_BLOCK_SIZE,
3783 .min_keysize = AES_MIN_KEY_SIZE,
3784 .max_keysize = AES_MAX_KEY_SIZE,
3785 .ivsize = 0,
3786 },
3787 .cipher_info = {
3788 .alg = CIPHER_ALG_AES,
3789 .mode = CIPHER_MODE_ECB,
3790 },
3791 .auth_info = {
3792 .alg = HASH_ALG_NONE,
3793 .mode = HASH_MODE_NONE,
3794 },
3795 },
3796 {
3797 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3798 .alg.skcipher = {
3799 .base.cra_name = "ctr(aes)",
3800 .base.cra_driver_name = "ctr-aes-iproc",
3801 .base.cra_blocksize = AES_BLOCK_SIZE,
3802 .min_keysize = AES_MIN_KEY_SIZE,
3803 .max_keysize = AES_MAX_KEY_SIZE,
3804 .ivsize = AES_BLOCK_SIZE,
3805 },
3806 .cipher_info = {
3807 .alg = CIPHER_ALG_AES,
3808 .mode = CIPHER_MODE_CTR,
3809 },
3810 .auth_info = {
3811 .alg = HASH_ALG_NONE,
3812 .mode = HASH_MODE_NONE,
3813 },
3814 },
3815{
3816 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3817 .alg.skcipher = {
3818 .base.cra_name = "xts(aes)",
3819 .base.cra_driver_name = "xts-aes-iproc",
3820 .base.cra_blocksize = AES_BLOCK_SIZE,
3821 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3822 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3823 .ivsize = AES_BLOCK_SIZE,
3824 },
3825 .cipher_info = {
3826 .alg = CIPHER_ALG_AES,
3827 .mode = CIPHER_MODE_XTS,
3828 },
3829 .auth_info = {
3830 .alg = HASH_ALG_NONE,
3831 .mode = HASH_MODE_NONE,
3832 },
3833 },
3834
3835
3836 {
3837 .type = CRYPTO_ALG_TYPE_AHASH,
3838 .alg.hash = {
3839 .halg.digestsize = MD5_DIGEST_SIZE,
3840 .halg.base = {
3841 .cra_name = "md5",
3842 .cra_driver_name = "md5-iproc",
3843 .cra_blocksize = MD5_BLOCK_WORDS * 4,
3844 .cra_flags = CRYPTO_ALG_ASYNC |
3845 CRYPTO_ALG_ALLOCATES_MEMORY,
3846 }
3847 },
3848 .cipher_info = {
3849 .alg = CIPHER_ALG_NONE,
3850 .mode = CIPHER_MODE_NONE,
3851 },
3852 .auth_info = {
3853 .alg = HASH_ALG_MD5,
3854 .mode = HASH_MODE_HASH,
3855 },
3856 },
3857 {
3858 .type = CRYPTO_ALG_TYPE_AHASH,
3859 .alg.hash = {
3860 .halg.digestsize = MD5_DIGEST_SIZE,
3861 .halg.base = {
3862 .cra_name = "hmac(md5)",
3863 .cra_driver_name = "hmac-md5-iproc",
3864 .cra_blocksize = MD5_BLOCK_WORDS * 4,
3865 }
3866 },
3867 .cipher_info = {
3868 .alg = CIPHER_ALG_NONE,
3869 .mode = CIPHER_MODE_NONE,
3870 },
3871 .auth_info = {
3872 .alg = HASH_ALG_MD5,
3873 .mode = HASH_MODE_HMAC,
3874 },
3875 },
3876 {.type = CRYPTO_ALG_TYPE_AHASH,
3877 .alg.hash = {
3878 .halg.digestsize = SHA1_DIGEST_SIZE,
3879 .halg.base = {
3880 .cra_name = "sha1",
3881 .cra_driver_name = "sha1-iproc",
3882 .cra_blocksize = SHA1_BLOCK_SIZE,
3883 }
3884 },
3885 .cipher_info = {
3886 .alg = CIPHER_ALG_NONE,
3887 .mode = CIPHER_MODE_NONE,
3888 },
3889 .auth_info = {
3890 .alg = HASH_ALG_SHA1,
3891 .mode = HASH_MODE_HASH,
3892 },
3893 },
3894 {.type = CRYPTO_ALG_TYPE_AHASH,
3895 .alg.hash = {
3896 .halg.digestsize = SHA1_DIGEST_SIZE,
3897 .halg.base = {
3898 .cra_name = "hmac(sha1)",
3899 .cra_driver_name = "hmac-sha1-iproc",
3900 .cra_blocksize = SHA1_BLOCK_SIZE,
3901 }
3902 },
3903 .cipher_info = {
3904 .alg = CIPHER_ALG_NONE,
3905 .mode = CIPHER_MODE_NONE,
3906 },
3907 .auth_info = {
3908 .alg = HASH_ALG_SHA1,
3909 .mode = HASH_MODE_HMAC,
3910 },
3911 },
3912 {.type = CRYPTO_ALG_TYPE_AHASH,
3913 .alg.hash = {
3914 .halg.digestsize = SHA224_DIGEST_SIZE,
3915 .halg.base = {
3916 .cra_name = "sha224",
3917 .cra_driver_name = "sha224-iproc",
3918 .cra_blocksize = SHA224_BLOCK_SIZE,
3919 }
3920 },
3921 .cipher_info = {
3922 .alg = CIPHER_ALG_NONE,
3923 .mode = CIPHER_MODE_NONE,
3924 },
3925 .auth_info = {
3926 .alg = HASH_ALG_SHA224,
3927 .mode = HASH_MODE_HASH,
3928 },
3929 },
3930 {.type = CRYPTO_ALG_TYPE_AHASH,
3931 .alg.hash = {
3932 .halg.digestsize = SHA224_DIGEST_SIZE,
3933 .halg.base = {
3934 .cra_name = "hmac(sha224)",
3935 .cra_driver_name = "hmac-sha224-iproc",
3936 .cra_blocksize = SHA224_BLOCK_SIZE,
3937 }
3938 },
3939 .cipher_info = {
3940 .alg = CIPHER_ALG_NONE,
3941 .mode = CIPHER_MODE_NONE,
3942 },
3943 .auth_info = {
3944 .alg = HASH_ALG_SHA224,
3945 .mode = HASH_MODE_HMAC,
3946 },
3947 },
3948 {.type = CRYPTO_ALG_TYPE_AHASH,
3949 .alg.hash = {
3950 .halg.digestsize = SHA256_DIGEST_SIZE,
3951 .halg.base = {
3952 .cra_name = "sha256",
3953 .cra_driver_name = "sha256-iproc",
3954 .cra_blocksize = SHA256_BLOCK_SIZE,
3955 }
3956 },
3957 .cipher_info = {
3958 .alg = CIPHER_ALG_NONE,
3959 .mode = CIPHER_MODE_NONE,
3960 },
3961 .auth_info = {
3962 .alg = HASH_ALG_SHA256,
3963 .mode = HASH_MODE_HASH,
3964 },
3965 },
3966 {.type = CRYPTO_ALG_TYPE_AHASH,
3967 .alg.hash = {
3968 .halg.digestsize = SHA256_DIGEST_SIZE,
3969 .halg.base = {
3970 .cra_name = "hmac(sha256)",
3971 .cra_driver_name = "hmac-sha256-iproc",
3972 .cra_blocksize = SHA256_BLOCK_SIZE,
3973 }
3974 },
3975 .cipher_info = {
3976 .alg = CIPHER_ALG_NONE,
3977 .mode = CIPHER_MODE_NONE,
3978 },
3979 .auth_info = {
3980 .alg = HASH_ALG_SHA256,
3981 .mode = HASH_MODE_HMAC,
3982 },
3983 },
3984 {
3985 .type = CRYPTO_ALG_TYPE_AHASH,
3986 .alg.hash = {
3987 .halg.digestsize = SHA384_DIGEST_SIZE,
3988 .halg.base = {
3989 .cra_name = "sha384",
3990 .cra_driver_name = "sha384-iproc",
3991 .cra_blocksize = SHA384_BLOCK_SIZE,
3992 }
3993 },
3994 .cipher_info = {
3995 .alg = CIPHER_ALG_NONE,
3996 .mode = CIPHER_MODE_NONE,
3997 },
3998 .auth_info = {
3999 .alg = HASH_ALG_SHA384,
4000 .mode = HASH_MODE_HASH,
4001 },
4002 },
4003 {
4004 .type = CRYPTO_ALG_TYPE_AHASH,
4005 .alg.hash = {
4006 .halg.digestsize = SHA384_DIGEST_SIZE,
4007 .halg.base = {
4008 .cra_name = "hmac(sha384)",
4009 .cra_driver_name = "hmac-sha384-iproc",
4010 .cra_blocksize = SHA384_BLOCK_SIZE,
4011 }
4012 },
4013 .cipher_info = {
4014 .alg = CIPHER_ALG_NONE,
4015 .mode = CIPHER_MODE_NONE,
4016 },
4017 .auth_info = {
4018 .alg = HASH_ALG_SHA384,
4019 .mode = HASH_MODE_HMAC,
4020 },
4021 },
4022 {
4023 .type = CRYPTO_ALG_TYPE_AHASH,
4024 .alg.hash = {
4025 .halg.digestsize = SHA512_DIGEST_SIZE,
4026 .halg.base = {
4027 .cra_name = "sha512",
4028 .cra_driver_name = "sha512-iproc",
4029 .cra_blocksize = SHA512_BLOCK_SIZE,
4030 }
4031 },
4032 .cipher_info = {
4033 .alg = CIPHER_ALG_NONE,
4034 .mode = CIPHER_MODE_NONE,
4035 },
4036 .auth_info = {
4037 .alg = HASH_ALG_SHA512,
4038 .mode = HASH_MODE_HASH,
4039 },
4040 },
4041 {
4042 .type = CRYPTO_ALG_TYPE_AHASH,
4043 .alg.hash = {
4044 .halg.digestsize = SHA512_DIGEST_SIZE,
4045 .halg.base = {
4046 .cra_name = "hmac(sha512)",
4047 .cra_driver_name = "hmac-sha512-iproc",
4048 .cra_blocksize = SHA512_BLOCK_SIZE,
4049 }
4050 },
4051 .cipher_info = {
4052 .alg = CIPHER_ALG_NONE,
4053 .mode = CIPHER_MODE_NONE,
4054 },
4055 .auth_info = {
4056 .alg = HASH_ALG_SHA512,
4057 .mode = HASH_MODE_HMAC,
4058 },
4059 },
4060 {
4061 .type = CRYPTO_ALG_TYPE_AHASH,
4062 .alg.hash = {
4063 .halg.digestsize = SHA3_224_DIGEST_SIZE,
4064 .halg.base = {
4065 .cra_name = "sha3-224",
4066 .cra_driver_name = "sha3-224-iproc",
4067 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4068 }
4069 },
4070 .cipher_info = {
4071 .alg = CIPHER_ALG_NONE,
4072 .mode = CIPHER_MODE_NONE,
4073 },
4074 .auth_info = {
4075 .alg = HASH_ALG_SHA3_224,
4076 .mode = HASH_MODE_HASH,
4077 },
4078 },
4079 {
4080 .type = CRYPTO_ALG_TYPE_AHASH,
4081 .alg.hash = {
4082 .halg.digestsize = SHA3_224_DIGEST_SIZE,
4083 .halg.base = {
4084 .cra_name = "hmac(sha3-224)",
4085 .cra_driver_name = "hmac-sha3-224-iproc",
4086 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4087 }
4088 },
4089 .cipher_info = {
4090 .alg = CIPHER_ALG_NONE,
4091 .mode = CIPHER_MODE_NONE,
4092 },
4093 .auth_info = {
4094 .alg = HASH_ALG_SHA3_224,
4095 .mode = HASH_MODE_HMAC
4096 },
4097 },
4098 {
4099 .type = CRYPTO_ALG_TYPE_AHASH,
4100 .alg.hash = {
4101 .halg.digestsize = SHA3_256_DIGEST_SIZE,
4102 .halg.base = {
4103 .cra_name = "sha3-256",
4104 .cra_driver_name = "sha3-256-iproc",
4105 .cra_blocksize = SHA3_256_BLOCK_SIZE,
4106 }
4107 },
4108 .cipher_info = {
4109 .alg = CIPHER_ALG_NONE,
4110 .mode = CIPHER_MODE_NONE,
4111 },
4112 .auth_info = {
4113 .alg = HASH_ALG_SHA3_256,
4114 .mode = HASH_MODE_HASH,
4115 },
4116 },
4117 {
4118 .type = CRYPTO_ALG_TYPE_AHASH,
4119 .alg.hash = {
4120 .halg.digestsize = SHA3_256_DIGEST_SIZE,
4121 .halg.base = {
4122 .cra_name = "hmac(sha3-256)",
4123 .cra_driver_name = "hmac-sha3-256-iproc",
4124 .cra_blocksize = SHA3_256_BLOCK_SIZE,
4125 }
4126 },
4127 .cipher_info = {
4128 .alg = CIPHER_ALG_NONE,
4129 .mode = CIPHER_MODE_NONE,
4130 },
4131 .auth_info = {
4132 .alg = HASH_ALG_SHA3_256,
4133 .mode = HASH_MODE_HMAC,
4134 },
4135 },
4136 {
4137 .type = CRYPTO_ALG_TYPE_AHASH,
4138 .alg.hash = {
4139 .halg.digestsize = SHA3_384_DIGEST_SIZE,
4140 .halg.base = {
4141 .cra_name = "sha3-384",
4142 .cra_driver_name = "sha3-384-iproc",
4143 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4144 }
4145 },
4146 .cipher_info = {
4147 .alg = CIPHER_ALG_NONE,
4148 .mode = CIPHER_MODE_NONE,
4149 },
4150 .auth_info = {
4151 .alg = HASH_ALG_SHA3_384,
4152 .mode = HASH_MODE_HASH,
4153 },
4154 },
4155 {
4156 .type = CRYPTO_ALG_TYPE_AHASH,
4157 .alg.hash = {
4158 .halg.digestsize = SHA3_384_DIGEST_SIZE,
4159 .halg.base = {
4160 .cra_name = "hmac(sha3-384)",
4161 .cra_driver_name = "hmac-sha3-384-iproc",
4162 .cra_blocksize = SHA3_384_BLOCK_SIZE,
4163 }
4164 },
4165 .cipher_info = {
4166 .alg = CIPHER_ALG_NONE,
4167 .mode = CIPHER_MODE_NONE,
4168 },
4169 .auth_info = {
4170 .alg = HASH_ALG_SHA3_384,
4171 .mode = HASH_MODE_HMAC,
4172 },
4173 },
4174 {
4175 .type = CRYPTO_ALG_TYPE_AHASH,
4176 .alg.hash = {
4177 .halg.digestsize = SHA3_512_DIGEST_SIZE,
4178 .halg.base = {
4179 .cra_name = "sha3-512",
4180 .cra_driver_name = "sha3-512-iproc",
4181 .cra_blocksize = SHA3_512_BLOCK_SIZE,
4182 }
4183 },
4184 .cipher_info = {
4185 .alg = CIPHER_ALG_NONE,
4186 .mode = CIPHER_MODE_NONE,
4187 },
4188 .auth_info = {
4189 .alg = HASH_ALG_SHA3_512,
4190 .mode = HASH_MODE_HASH,
4191 },
4192 },
4193 {
4194 .type = CRYPTO_ALG_TYPE_AHASH,
4195 .alg.hash = {
4196 .halg.digestsize = SHA3_512_DIGEST_SIZE,
4197 .halg.base = {
4198 .cra_name = "hmac(sha3-512)",
4199 .cra_driver_name = "hmac-sha3-512-iproc",
4200 .cra_blocksize = SHA3_512_BLOCK_SIZE,
4201 }
4202 },
4203 .cipher_info = {
4204 .alg = CIPHER_ALG_NONE,
4205 .mode = CIPHER_MODE_NONE,
4206 },
4207 .auth_info = {
4208 .alg = HASH_ALG_SHA3_512,
4209 .mode = HASH_MODE_HMAC,
4210 },
4211 },
4212 {
4213 .type = CRYPTO_ALG_TYPE_AHASH,
4214 .alg.hash = {
4215 .halg.digestsize = AES_BLOCK_SIZE,
4216 .halg.base = {
4217 .cra_name = "xcbc(aes)",
4218 .cra_driver_name = "xcbc-aes-iproc",
4219 .cra_blocksize = AES_BLOCK_SIZE,
4220 }
4221 },
4222 .cipher_info = {
4223 .alg = CIPHER_ALG_NONE,
4224 .mode = CIPHER_MODE_NONE,
4225 },
4226 .auth_info = {
4227 .alg = HASH_ALG_AES,
4228 .mode = HASH_MODE_XCBC,
4229 },
4230 },
4231 {
4232 .type = CRYPTO_ALG_TYPE_AHASH,
4233 .alg.hash = {
4234 .halg.digestsize = AES_BLOCK_SIZE,
4235 .halg.base = {
4236 .cra_name = "cmac(aes)",
4237 .cra_driver_name = "cmac-aes-iproc",
4238 .cra_blocksize = AES_BLOCK_SIZE,
4239 }
4240 },
4241 .cipher_info = {
4242 .alg = CIPHER_ALG_NONE,
4243 .mode = CIPHER_MODE_NONE,
4244 },
4245 .auth_info = {
4246 .alg = HASH_ALG_AES,
4247 .mode = HASH_MODE_CMAC,
4248 },
4249 },
4250};
4251
4252static int generic_cra_init(struct crypto_tfm *tfm,
4253 struct iproc_alg_s *cipher_alg)
4254{
4255 struct spu_hw *spu = &iproc_priv.spu;
4256 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4257 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
4258
4259 flow_log("%s()\n", __func__);
4260
4261 ctx->alg = cipher_alg;
4262 ctx->cipher = cipher_alg->cipher_info;
4263 ctx->auth = cipher_alg->auth_info;
4264 ctx->auth_first = cipher_alg->auth_first;
4265 ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
4266 ctx->cipher.mode,
4267 blocksize);
4268 ctx->fallback_cipher = NULL;
4269
4270 ctx->enckeylen = 0;
4271 ctx->authkeylen = 0;
4272
4273 atomic_inc(&iproc_priv.stream_count);
4274 atomic_inc(&iproc_priv.session_count);
4275
4276 return 0;
4277}
4278
4279static int skcipher_init_tfm(struct crypto_skcipher *skcipher)
4280{
4281 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
4282 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
4283 struct iproc_alg_s *cipher_alg;
4284
4285 flow_log("%s()\n", __func__);
4286
4287 crypto_skcipher_set_reqsize(skcipher, sizeof(struct iproc_reqctx_s));
4288
4289 cipher_alg = container_of(alg, struct iproc_alg_s, alg.skcipher);
4290 return generic_cra_init(tfm, cipher_alg);
4291}
4292
4293static int ahash_cra_init(struct crypto_tfm *tfm)
4294{
4295 int err;
4296 struct crypto_alg *alg = tfm->__crt_alg;
4297 struct iproc_alg_s *cipher_alg;
4298
4299 cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
4300 alg.hash);
4301
4302 err = generic_cra_init(tfm, cipher_alg);
4303 flow_log("%s()\n", __func__);
4304
4305
4306
4307
4308
4309 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4310 sizeof(struct iproc_reqctx_s));
4311
4312 return err;
4313}
4314
4315static int aead_cra_init(struct crypto_aead *aead)
4316{
4317 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4318 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4319 struct crypto_alg *alg = tfm->__crt_alg;
4320 struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
4321 struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
4322 alg.aead);
4323
4324 int err = generic_cra_init(tfm, cipher_alg);
4325
4326 flow_log("%s()\n", __func__);
4327
4328 crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
4329 ctx->is_esp = false;
4330 ctx->salt_len = 0;
4331 ctx->salt_offset = 0;
4332
4333
4334 get_random_bytes(ctx->iv, MAX_IV_SIZE);
4335 flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE);
4336
4337 if (!err) {
4338 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
4339 flow_log("%s() creating fallback cipher\n", __func__);
4340
4341 ctx->fallback_cipher =
4342 crypto_alloc_aead(alg->cra_name, 0,
4343 CRYPTO_ALG_ASYNC |
4344 CRYPTO_ALG_NEED_FALLBACK);
4345 if (IS_ERR(ctx->fallback_cipher)) {
4346 pr_err("%s() Error: failed to allocate fallback for %s\n",
4347 __func__, alg->cra_name);
4348 return PTR_ERR(ctx->fallback_cipher);
4349 }
4350 }
4351 }
4352
4353 return err;
4354}
4355
4356static void generic_cra_exit(struct crypto_tfm *tfm)
4357{
4358 atomic_dec(&iproc_priv.session_count);
4359}
4360
4361static void skcipher_exit_tfm(struct crypto_skcipher *tfm)
4362{
4363 generic_cra_exit(crypto_skcipher_tfm(tfm));
4364}
4365
4366static void aead_cra_exit(struct crypto_aead *aead)
4367{
4368 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4369 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4370
4371 generic_cra_exit(tfm);
4372
4373 if (ctx->fallback_cipher) {
4374 crypto_free_aead(ctx->fallback_cipher);
4375 ctx->fallback_cipher = NULL;
4376 }
4377}
4378
4379
4380
4381
4382
4383
4384
4385
4386static void spu_functions_register(struct device *dev,
4387 enum spu_spu_type spu_type,
4388 enum spu_spu_subtype spu_subtype)
4389{
4390 struct spu_hw *spu = &iproc_priv.spu;
4391
4392 if (spu_type == SPU_TYPE_SPUM) {
4393 dev_dbg(dev, "Registering SPUM functions");
4394 spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
4395 spu->spu_payload_length = spum_payload_length;
4396 spu->spu_response_hdr_len = spum_response_hdr_len;
4397 spu->spu_hash_pad_len = spum_hash_pad_len;
4398 spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
4399 spu->spu_assoc_resp_len = spum_assoc_resp_len;
4400 spu->spu_aead_ivlen = spum_aead_ivlen;
4401 spu->spu_hash_type = spum_hash_type;
4402 spu->spu_digest_size = spum_digest_size;
4403 spu->spu_create_request = spum_create_request;
4404 spu->spu_cipher_req_init = spum_cipher_req_init;
4405 spu->spu_cipher_req_finish = spum_cipher_req_finish;
4406 spu->spu_request_pad = spum_request_pad;
4407 spu->spu_tx_status_len = spum_tx_status_len;
4408 spu->spu_rx_status_len = spum_rx_status_len;
4409 spu->spu_status_process = spum_status_process;
4410 spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
4411 spu->spu_ccm_update_iv = spum_ccm_update_iv;
4412 spu->spu_wordalign_padlen = spum_wordalign_padlen;
4413 if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
4414 spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
4415 else
4416 spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
4417 } else {
4418 dev_dbg(dev, "Registering SPU2 functions");
4419 spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
4420 spu->spu_ctx_max_payload = spu2_ctx_max_payload;
4421 spu->spu_payload_length = spu2_payload_length;
4422 spu->spu_response_hdr_len = spu2_response_hdr_len;
4423 spu->spu_hash_pad_len = spu2_hash_pad_len;
4424 spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
4425 spu->spu_assoc_resp_len = spu2_assoc_resp_len;
4426 spu->spu_aead_ivlen = spu2_aead_ivlen;
4427 spu->spu_hash_type = spu2_hash_type;
4428 spu->spu_digest_size = spu2_digest_size;
4429 spu->spu_create_request = spu2_create_request;
4430 spu->spu_cipher_req_init = spu2_cipher_req_init;
4431 spu->spu_cipher_req_finish = spu2_cipher_req_finish;
4432 spu->spu_request_pad = spu2_request_pad;
4433 spu->spu_tx_status_len = spu2_tx_status_len;
4434 spu->spu_rx_status_len = spu2_rx_status_len;
4435 spu->spu_status_process = spu2_status_process;
4436 spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
4437 spu->spu_ccm_update_iv = spu2_ccm_update_iv;
4438 spu->spu_wordalign_padlen = spu2_wordalign_padlen;
4439 }
4440}
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450static int spu_mb_init(struct device *dev)
4451{
4452 struct mbox_client *mcl = &iproc_priv.mcl;
4453 int err, i;
4454
4455 iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
4456 sizeof(struct mbox_chan *), GFP_KERNEL);
4457 if (!iproc_priv.mbox)
4458 return -ENOMEM;
4459
4460 mcl->dev = dev;
4461 mcl->tx_block = false;
4462 mcl->tx_tout = 0;
4463 mcl->knows_txdone = true;
4464 mcl->rx_callback = spu_rx_callback;
4465 mcl->tx_done = NULL;
4466
4467 for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4468 iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
4469 if (IS_ERR(iproc_priv.mbox[i])) {
4470 err = PTR_ERR(iproc_priv.mbox[i]);
4471 dev_err(dev,
4472 "Mbox channel %d request failed with err %d",
4473 i, err);
4474 iproc_priv.mbox[i] = NULL;
4475 goto free_channels;
4476 }
4477 }
4478
4479 return 0;
4480free_channels:
4481 for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4482 if (iproc_priv.mbox[i])
4483 mbox_free_channel(iproc_priv.mbox[i]);
4484 }
4485
4486 return err;
4487}
4488
4489static void spu_mb_release(struct platform_device *pdev)
4490{
4491 int i;
4492
4493 for (i = 0; i < iproc_priv.spu.num_chan; i++)
4494 mbox_free_channel(iproc_priv.mbox[i]);
4495}
4496
4497static void spu_counters_init(void)
4498{
4499 int i;
4500 int j;
4501
4502 atomic_set(&iproc_priv.session_count, 0);
4503 atomic_set(&iproc_priv.stream_count, 0);
4504 atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
4505 atomic64_set(&iproc_priv.bytes_in, 0);
4506 atomic64_set(&iproc_priv.bytes_out, 0);
4507 for (i = 0; i < SPU_OP_NUM; i++) {
4508 atomic_set(&iproc_priv.op_counts[i], 0);
4509 atomic_set(&iproc_priv.setkey_cnt[i], 0);
4510 }
4511 for (i = 0; i < CIPHER_ALG_LAST; i++)
4512 for (j = 0; j < CIPHER_MODE_LAST; j++)
4513 atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
4514
4515 for (i = 0; i < HASH_ALG_LAST; i++) {
4516 atomic_set(&iproc_priv.hash_cnt[i], 0);
4517 atomic_set(&iproc_priv.hmac_cnt[i], 0);
4518 }
4519 for (i = 0; i < AEAD_TYPE_LAST; i++)
4520 atomic_set(&iproc_priv.aead_cnt[i], 0);
4521
4522 atomic_set(&iproc_priv.mb_no_spc, 0);
4523 atomic_set(&iproc_priv.mb_send_fail, 0);
4524 atomic_set(&iproc_priv.bad_icv, 0);
4525}
4526
4527static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
4528{
4529 struct spu_hw *spu = &iproc_priv.spu;
4530 struct skcipher_alg *crypto = &driver_alg->alg.skcipher;
4531 int err;
4532
4533
4534 if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) &&
4535 (spu->spu_type == SPU_TYPE_SPU2))
4536 return 0;
4537
4538 crypto->base.cra_module = THIS_MODULE;
4539 crypto->base.cra_priority = cipher_pri;
4540 crypto->base.cra_alignmask = 0;
4541 crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4542 crypto->base.cra_flags = CRYPTO_ALG_ASYNC |
4543 CRYPTO_ALG_ALLOCATES_MEMORY |
4544 CRYPTO_ALG_KERN_DRIVER_ONLY;
4545
4546 crypto->init = skcipher_init_tfm;
4547 crypto->exit = skcipher_exit_tfm;
4548 crypto->setkey = skcipher_setkey;
4549 crypto->encrypt = skcipher_encrypt;
4550 crypto->decrypt = skcipher_decrypt;
4551
4552 err = crypto_register_skcipher(crypto);
4553
4554 if (err == 0)
4555 driver_alg->registered = true;
4556 pr_debug(" registered skcipher %s\n", crypto->base.cra_driver_name);
4557 return err;
4558}
4559
4560static int spu_register_ahash(struct iproc_alg_s *driver_alg)
4561{
4562 struct spu_hw *spu = &iproc_priv.spu;
4563 struct ahash_alg *hash = &driver_alg->alg.hash;
4564 int err;
4565
4566
4567 if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4568 (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
4569 (spu->spu_type == SPU_TYPE_SPUM))
4570 return 0;
4571
4572
4573 if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
4574 (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
4575 return 0;
4576
4577 hash->halg.base.cra_module = THIS_MODULE;
4578 hash->halg.base.cra_priority = hash_pri;
4579 hash->halg.base.cra_alignmask = 0;
4580 hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4581 hash->halg.base.cra_init = ahash_cra_init;
4582 hash->halg.base.cra_exit = generic_cra_exit;
4583 hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
4584 CRYPTO_ALG_ALLOCATES_MEMORY;
4585 hash->halg.statesize = sizeof(struct spu_hash_export_s);
4586
4587 if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
4588 hash->init = ahash_init;
4589 hash->update = ahash_update;
4590 hash->final = ahash_final;
4591 hash->finup = ahash_finup;
4592 hash->digest = ahash_digest;
4593 if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4594 ((driver_alg->auth_info.mode == HASH_MODE_XCBC) ||
4595 (driver_alg->auth_info.mode == HASH_MODE_CMAC))) {
4596 hash->setkey = ahash_setkey;
4597 }
4598 } else {
4599 hash->setkey = ahash_hmac_setkey;
4600 hash->init = ahash_hmac_init;
4601 hash->update = ahash_hmac_update;
4602 hash->final = ahash_hmac_final;
4603 hash->finup = ahash_hmac_finup;
4604 hash->digest = ahash_hmac_digest;
4605 }
4606 hash->export = ahash_export;
4607 hash->import = ahash_import;
4608
4609 err = crypto_register_ahash(hash);
4610
4611 if (err == 0)
4612 driver_alg->registered = true;
4613 pr_debug(" registered ahash %s\n",
4614 hash->halg.base.cra_driver_name);
4615 return err;
4616}
4617
4618static int spu_register_aead(struct iproc_alg_s *driver_alg)
4619{
4620 struct aead_alg *aead = &driver_alg->alg.aead;
4621 int err;
4622
4623 aead->base.cra_module = THIS_MODULE;
4624 aead->base.cra_priority = aead_pri;
4625 aead->base.cra_alignmask = 0;
4626 aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4627
4628 aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4629
4630 aead->setauthsize = aead_setauthsize;
4631 aead->encrypt = aead_encrypt;
4632 aead->decrypt = aead_decrypt;
4633 aead->init = aead_cra_init;
4634 aead->exit = aead_cra_exit;
4635
4636 err = crypto_register_aead(aead);
4637
4638 if (err == 0)
4639 driver_alg->registered = true;
4640 pr_debug(" registered aead %s\n", aead->base.cra_driver_name);
4641 return err;
4642}
4643
4644
4645static int spu_algs_register(struct device *dev)
4646{
4647 int i, j;
4648 int err;
4649
4650 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4651 switch (driver_algs[i].type) {
4652 case CRYPTO_ALG_TYPE_SKCIPHER:
4653 err = spu_register_skcipher(&driver_algs[i]);
4654 break;
4655 case CRYPTO_ALG_TYPE_AHASH:
4656 err = spu_register_ahash(&driver_algs[i]);
4657 break;
4658 case CRYPTO_ALG_TYPE_AEAD:
4659 err = spu_register_aead(&driver_algs[i]);
4660 break;
4661 default:
4662 dev_err(dev,
4663 "iproc-crypto: unknown alg type: %d",
4664 driver_algs[i].type);
4665 err = -EINVAL;
4666 }
4667
4668 if (err) {
4669 dev_err(dev, "alg registration failed with error %d\n",
4670 err);
4671 goto err_algs;
4672 }
4673 }
4674
4675 return 0;
4676
4677err_algs:
4678 for (j = 0; j < i; j++) {
4679
4680 if (!driver_algs[j].registered)
4681 continue;
4682 switch (driver_algs[j].type) {
4683 case CRYPTO_ALG_TYPE_SKCIPHER:
4684 crypto_unregister_skcipher(&driver_algs[j].alg.skcipher);
4685 driver_algs[j].registered = false;
4686 break;
4687 case CRYPTO_ALG_TYPE_AHASH:
4688 crypto_unregister_ahash(&driver_algs[j].alg.hash);
4689 driver_algs[j].registered = false;
4690 break;
4691 case CRYPTO_ALG_TYPE_AEAD:
4692 crypto_unregister_aead(&driver_algs[j].alg.aead);
4693 driver_algs[j].registered = false;
4694 break;
4695 }
4696 }
4697 return err;
4698}
4699
4700
4701
4702static struct spu_type_subtype spum_ns2_types = {
4703 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
4704};
4705
4706static struct spu_type_subtype spum_nsp_types = {
4707 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
4708};
4709
4710static struct spu_type_subtype spu2_types = {
4711 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
4712};
4713
4714static struct spu_type_subtype spu2_v2_types = {
4715 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
4716};
4717
4718static const struct of_device_id bcm_spu_dt_ids[] = {
4719 {
4720 .compatible = "brcm,spum-crypto",
4721 .data = &spum_ns2_types,
4722 },
4723 {
4724 .compatible = "brcm,spum-nsp-crypto",
4725 .data = &spum_nsp_types,
4726 },
4727 {
4728 .compatible = "brcm,spu2-crypto",
4729 .data = &spu2_types,
4730 },
4731 {
4732 .compatible = "brcm,spu2-v2-crypto",
4733 .data = &spu2_v2_types,
4734 },
4735 { }
4736};
4737
4738MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
4739
4740static int spu_dt_read(struct platform_device *pdev)
4741{
4742 struct device *dev = &pdev->dev;
4743 struct spu_hw *spu = &iproc_priv.spu;
4744 struct resource *spu_ctrl_regs;
4745 const struct spu_type_subtype *matched_spu_type;
4746 struct device_node *dn = pdev->dev.of_node;
4747 int err, i;
4748
4749
4750 spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
4751
4752 matched_spu_type = of_device_get_match_data(dev);
4753 if (!matched_spu_type) {
4754 dev_err(dev, "Failed to match device\n");
4755 return -ENODEV;
4756 }
4757
4758 spu->spu_type = matched_spu_type->type;
4759 spu->spu_subtype = matched_spu_type->subtype;
4760
4761 for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
4762 platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
4763
4764 spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
4765 if (IS_ERR(spu->reg_vbase[i])) {
4766 err = PTR_ERR(spu->reg_vbase[i]);
4767 dev_err(dev, "Failed to map registers: %d\n",
4768 err);
4769 spu->reg_vbase[i] = NULL;
4770 return err;
4771 }
4772 }
4773 spu->num_spu = i;
4774 dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
4775
4776 return 0;
4777}
4778
4779static int bcm_spu_probe(struct platform_device *pdev)
4780{
4781 struct device *dev = &pdev->dev;
4782 struct spu_hw *spu = &iproc_priv.spu;
4783 int err;
4784
4785 iproc_priv.pdev = pdev;
4786 platform_set_drvdata(iproc_priv.pdev,
4787 &iproc_priv);
4788
4789 err = spu_dt_read(pdev);
4790 if (err < 0)
4791 goto failure;
4792
4793 err = spu_mb_init(dev);
4794 if (err < 0)
4795 goto failure;
4796
4797 if (spu->spu_type == SPU_TYPE_SPUM)
4798 iproc_priv.bcm_hdr_len = 8;
4799 else if (spu->spu_type == SPU_TYPE_SPU2)
4800 iproc_priv.bcm_hdr_len = 0;
4801
4802 spu_functions_register(dev, spu->spu_type, spu->spu_subtype);
4803
4804 spu_counters_init();
4805
4806 spu_setup_debugfs();
4807
4808 err = spu_algs_register(dev);
4809 if (err < 0)
4810 goto fail_reg;
4811
4812 return 0;
4813
4814fail_reg:
4815 spu_free_debugfs();
4816failure:
4817 spu_mb_release(pdev);
4818 dev_err(dev, "%s failed with error %d.\n", __func__, err);
4819
4820 return err;
4821}
4822
4823static int bcm_spu_remove(struct platform_device *pdev)
4824{
4825 int i;
4826 struct device *dev = &pdev->dev;
4827 char *cdn;
4828
4829 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4830
4831
4832
4833
4834
4835 if (!driver_algs[i].registered)
4836 continue;
4837
4838 switch (driver_algs[i].type) {
4839 case CRYPTO_ALG_TYPE_SKCIPHER:
4840 crypto_unregister_skcipher(&driver_algs[i].alg.skcipher);
4841 dev_dbg(dev, " unregistered cipher %s\n",
4842 driver_algs[i].alg.skcipher.base.cra_driver_name);
4843 driver_algs[i].registered = false;
4844 break;
4845 case CRYPTO_ALG_TYPE_AHASH:
4846 crypto_unregister_ahash(&driver_algs[i].alg.hash);
4847 cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
4848 dev_dbg(dev, " unregistered hash %s\n", cdn);
4849 driver_algs[i].registered = false;
4850 break;
4851 case CRYPTO_ALG_TYPE_AEAD:
4852 crypto_unregister_aead(&driver_algs[i].alg.aead);
4853 dev_dbg(dev, " unregistered aead %s\n",
4854 driver_algs[i].alg.aead.base.cra_driver_name);
4855 driver_algs[i].registered = false;
4856 break;
4857 }
4858 }
4859 spu_free_debugfs();
4860 spu_mb_release(pdev);
4861 return 0;
4862}
4863
4864
4865
4866static struct platform_driver bcm_spu_pdriver = {
4867 .driver = {
4868 .name = "brcm-spu-crypto",
4869 .of_match_table = of_match_ptr(bcm_spu_dt_ids),
4870 },
4871 .probe = bcm_spu_probe,
4872 .remove = bcm_spu_remove,
4873};
4874module_platform_driver(bcm_spu_pdriver);
4875
4876MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
4877MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4878MODULE_LICENSE("GPL v2");
4879