1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/kernel.h>
22#include <linux/interrupt.h>
23#include <linux/platform_device.h>
24#include <linux/scatterlist.h>
25#include <linux/crypto.h>
26#include <linux/kthread.h>
27#include <linux/rtnetlink.h>
28#include <linux/sched.h>
29#include <linux/of_address.h>
30#include <linux/of_device.h>
31#include <linux/io.h>
32#include <linux/bitops.h>
33
34#include <crypto/algapi.h>
35#include <crypto/aead.h>
36#include <crypto/internal/aead.h>
37#include <crypto/aes.h>
38#include <crypto/des.h>
39#include <crypto/hmac.h>
40#include <crypto/sha.h>
41#include <crypto/md5.h>
42#include <crypto/authenc.h>
43#include <crypto/skcipher.h>
44#include <crypto/hash.h>
45#include <crypto/sha3.h>
46
47#include "util.h"
48#include "cipher.h"
49#include "spu.h"
50#include "spum.h"
51#include "spu2.h"
52
53
54
55struct device_private iproc_priv;
56
57
58
59int flow_debug_logging;
60module_param(flow_debug_logging, int, 0644);
61MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
62
63int packet_debug_logging;
64module_param(packet_debug_logging, int, 0644);
65MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
66
67int debug_logging_sleep;
68module_param(debug_logging_sleep, int, 0644);
69MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
70
71
72
73
74
75
76
77
78
79
80static int cipher_pri = 150;
81module_param(cipher_pri, int, 0644);
82MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
83
84static int hash_pri = 100;
85module_param(hash_pri, int, 0644);
86MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
87
88static int aead_pri = 150;
89module_param(aead_pri, int, 0644);
90MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
91
92
93
94
95
96
97
98
99char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
100
101
102
103
104#define BCM_HDR_LEN iproc_priv.bcm_hdr_len
105
106
107#define MBOX_SLEEP_MIN 800
108#define MBOX_SLEEP_MAX 1000
109
110
111
112
113
114
115
116static u8 select_channel(void)
117{
118 u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
119
120 return chan_idx % iproc_priv.spu.num_chan;
121}
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143static int
144spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
145 struct iproc_reqctx_s *rctx,
146 u8 rx_frag_num,
147 unsigned int chunksize, u32 stat_pad_len)
148{
149 struct spu_hw *spu = &iproc_priv.spu;
150 struct scatterlist *sg;
151 struct iproc_ctx_s *ctx = rctx->ctx;
152 u32 datalen;
153
154 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
155 rctx->gfp);
156 if (!mssg->spu.dst)
157 return -ENOMEM;
158
159 sg = mssg->spu.dst;
160 sg_init_table(sg, rx_frag_num);
161
162 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
163
164
165 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
166 spu->spu_xts_tweak_in_payload())
167 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
168 SPU_XTS_TWEAK_SIZE);
169
170
171 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
172 rctx->dst_nents, chunksize);
173 if (datalen < chunksize) {
174 pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
175 __func__, chunksize, datalen);
176 return -EFAULT;
177 }
178
179 if (ctx->cipher.alg == CIPHER_ALG_RC4)
180
181 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN);
182
183 if (stat_pad_len)
184 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
185
186 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
187 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
188
189 return 0;
190}
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211static int
212spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
213 struct iproc_reqctx_s *rctx,
214 u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
215{
216 struct spu_hw *spu = &iproc_priv.spu;
217 struct scatterlist *sg;
218 struct iproc_ctx_s *ctx = rctx->ctx;
219 u32 datalen;
220 u32 stat_len;
221
222 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
223 rctx->gfp);
224 if (unlikely(!mssg->spu.src))
225 return -ENOMEM;
226
227 sg = mssg->spu.src;
228 sg_init_table(sg, tx_frag_num);
229
230 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
231 BCM_HDR_LEN + ctx->spu_req_hdr_len);
232
233
234 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
235 spu->spu_xts_tweak_in_payload())
236 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
237
238
239 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
240 rctx->src_nents, chunksize);
241 if (unlikely(datalen < chunksize)) {
242 pr_err("%s(): failed to copy src sg to mbox msg",
243 __func__);
244 return -EFAULT;
245 }
246
247 if (pad_len)
248 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
249
250 stat_len = spu->spu_tx_status_len();
251 if (stat_len) {
252 memset(rctx->msg_buf.tx_stat, 0, stat_len);
253 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
254 }
255 return 0;
256}
257
258static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
259 u8 chan_idx)
260{
261 int err;
262 int retry_cnt = 0;
263 struct device *dev = &(iproc_priv.pdev->dev);
264
265 err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
266 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
267 while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
268
269
270
271
272 retry_cnt++;
273 usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
274 err = mbox_send_message(iproc_priv.mbox[chan_idx],
275 mssg);
276 atomic_inc(&iproc_priv.mb_no_spc);
277 }
278 }
279 if (err < 0) {
280 atomic_inc(&iproc_priv.mb_send_fail);
281 return err;
282 }
283
284
285 err = mssg->error;
286 if (unlikely(err < 0)) {
287 dev_err(dev, "message error %d", err);
288
289 }
290
291
292 mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
293 return err;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
315{
316 struct spu_hw *spu = &iproc_priv.spu;
317 struct crypto_async_request *areq = rctx->parent;
318 struct ablkcipher_request *req =
319 container_of(areq, struct ablkcipher_request, base);
320 struct iproc_ctx_s *ctx = rctx->ctx;
321 struct spu_cipher_parms cipher_parms;
322 int err = 0;
323 unsigned int chunksize = 0;
324 int remaining = 0;
325 int chunk_start;
326
327
328 u8 local_iv_ctr[MAX_IV_SIZE];
329 u32 stat_pad_len;
330 u32 pad_len;
331 bool update_key = false;
332 struct brcm_message *mssg;
333
334
335 u8 rx_frag_num = 2;
336 u8 tx_frag_num = 1;
337
338 flow_log("%s\n", __func__);
339
340 cipher_parms.alg = ctx->cipher.alg;
341 cipher_parms.mode = ctx->cipher.mode;
342 cipher_parms.type = ctx->cipher_type;
343 cipher_parms.key_len = ctx->enckeylen;
344 cipher_parms.key_buf = ctx->enckey;
345 cipher_parms.iv_buf = local_iv_ctr;
346 cipher_parms.iv_len = rctx->iv_ctr_len;
347
348 mssg = &rctx->mb_mssg;
349 chunk_start = rctx->src_sent;
350 remaining = rctx->total_todo - chunk_start;
351
352
353 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
354 (remaining > ctx->max_payload))
355 chunksize = ctx->max_payload;
356 else
357 chunksize = remaining;
358
359 rctx->src_sent += chunksize;
360 rctx->total_sent = rctx->src_sent;
361
362
363 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
364 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
365
366 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
367 rctx->is_encrypt && chunk_start)
368
369
370
371
372 sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
373 rctx->iv_ctr_len,
374 chunk_start - rctx->iv_ctr_len);
375
376 if (rctx->iv_ctr_len) {
377
378 __builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
379 rctx->iv_ctr_len);
380
381
382 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
383 !rctx->is_encrypt) {
384
385
386
387
388 sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
389 rctx->iv_ctr_len,
390 rctx->src_sent - rctx->iv_ctr_len);
391 } else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
392
393
394
395
396
397
398
399
400
401 add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
402 }
403 }
404
405 if (ctx->cipher.alg == CIPHER_ALG_RC4) {
406 rx_frag_num++;
407 if (chunk_start) {
408
409
410
411
412 cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak;
413 update_key = true;
414 cipher_parms.type = CIPHER_TYPE_UPDT;
415 } else if (!rctx->is_encrypt) {
416
417
418
419
420
421
422 update_key = true;
423 cipher_parms.type = CIPHER_TYPE_INIT;
424 }
425 }
426
427 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
428 flow_log("max_payload infinite\n");
429 else
430 flow_log("max_payload %u\n", ctx->max_payload);
431
432 flow_log("sent:%u start:%u remains:%u size:%u\n",
433 rctx->src_sent, chunk_start, remaining, chunksize);
434
435
436 memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
437 sizeof(rctx->msg_buf.bcm_spu_req_hdr));
438
439
440
441
442
443
444 spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
445 ctx->spu_req_hdr_len, !(rctx->is_encrypt),
446 &cipher_parms, update_key, chunksize);
447
448 atomic64_add(chunksize, &iproc_priv.bytes_out);
449
450 stat_pad_len = spu->spu_wordalign_padlen(chunksize);
451 if (stat_pad_len)
452 rx_frag_num++;
453 pad_len = stat_pad_len;
454 if (pad_len) {
455 tx_frag_num++;
456 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
457 0, ctx->auth.alg, ctx->auth.mode,
458 rctx->total_sent, stat_pad_len);
459 }
460
461 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
462 ctx->spu_req_hdr_len);
463 packet_log("payload:\n");
464 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
465 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
466
467
468
469
470
471 memset(mssg, 0, sizeof(*mssg));
472 mssg->type = BRCM_MESSAGE_SPU;
473 mssg->ctx = rctx;
474
475
476 rx_frag_num += rctx->dst_nents;
477
478 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
479 spu->spu_xts_tweak_in_payload())
480 rx_frag_num++;
481
482 err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
483 stat_pad_len);
484 if (err)
485 return err;
486
487
488 tx_frag_num += rctx->src_nents;
489 if (spu->spu_tx_status_len())
490 tx_frag_num++;
491
492 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
493 spu->spu_xts_tweak_in_payload())
494 tx_frag_num++;
495
496 err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
497 pad_len);
498 if (err)
499 return err;
500
501 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
502 if (unlikely(err < 0))
503 return err;
504
505 return -EINPROGRESS;
506}
507
508
509
510
511
512
513static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx)
514{
515 struct spu_hw *spu = &iproc_priv.spu;
516#ifdef DEBUG
517 struct crypto_async_request *areq = rctx->parent;
518 struct ablkcipher_request *req = ablkcipher_request_cast(areq);
519#endif
520 struct iproc_ctx_s *ctx = rctx->ctx;
521 u32 payload_len;
522
523
524 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
525
526
527
528
529
530 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
531 spu->spu_xts_tweak_in_payload() &&
532 (payload_len >= SPU_XTS_TWEAK_SIZE))
533 payload_len -= SPU_XTS_TWEAK_SIZE;
534
535 atomic64_add(payload_len, &iproc_priv.bytes_in);
536
537 flow_log("%s() offset: %u, bd_len: %u BD:\n",
538 __func__, rctx->total_received, payload_len);
539
540 dump_sg(req->dst, rctx->total_received, payload_len);
541 if (ctx->cipher.alg == CIPHER_ALG_RC4)
542 packet_dump(" supdt ", rctx->msg_buf.c.supdt_tweak,
543 SPU_SUPDT_LEN);
544
545 rctx->total_received += payload_len;
546 if (rctx->total_received == rctx->total_todo) {
547 atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
548 atomic_inc(
549 &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
550 }
551}
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572static int
573spu_ahash_rx_sg_create(struct brcm_message *mssg,
574 struct iproc_reqctx_s *rctx,
575 u8 rx_frag_num, unsigned int digestsize,
576 u32 stat_pad_len)
577{
578 struct spu_hw *spu = &iproc_priv.spu;
579 struct scatterlist *sg;
580 struct iproc_ctx_s *ctx = rctx->ctx;
581
582 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
583 rctx->gfp);
584 if (!mssg->spu.dst)
585 return -ENOMEM;
586
587 sg = mssg->spu.dst;
588 sg_init_table(sg, rx_frag_num);
589
590 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
591
592
593 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
594
595 if (stat_pad_len)
596 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
597
598 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
599 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
600 return 0;
601}
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624static int
625spu_ahash_tx_sg_create(struct brcm_message *mssg,
626 struct iproc_reqctx_s *rctx,
627 u8 tx_frag_num,
628 u32 spu_hdr_len,
629 unsigned int hash_carry_len,
630 unsigned int new_data_len, u32 pad_len)
631{
632 struct spu_hw *spu = &iproc_priv.spu;
633 struct scatterlist *sg;
634 u32 datalen;
635 u32 stat_len;
636
637 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
638 rctx->gfp);
639 if (!mssg->spu.src)
640 return -ENOMEM;
641
642 sg = mssg->spu.src;
643 sg_init_table(sg, tx_frag_num);
644
645 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
646 BCM_HDR_LEN + spu_hdr_len);
647
648 if (hash_carry_len)
649 sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
650
651 if (new_data_len) {
652
653 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
654 rctx->src_nents, new_data_len);
655 if (datalen < new_data_len) {
656 pr_err("%s(): failed to copy src sg to mbox msg",
657 __func__);
658 return -EFAULT;
659 }
660 }
661
662 if (pad_len)
663 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
664
665 stat_len = spu->spu_tx_status_len();
666 if (stat_len) {
667 memset(rctx->msg_buf.tx_stat, 0, stat_len);
668 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
669 }
670
671 return 0;
672}
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700static int handle_ahash_req(struct iproc_reqctx_s *rctx)
701{
702 struct spu_hw *spu = &iproc_priv.spu;
703 struct crypto_async_request *areq = rctx->parent;
704 struct ahash_request *req = ahash_request_cast(areq);
705 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
706 struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
707 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
708 struct iproc_ctx_s *ctx = rctx->ctx;
709
710
711 unsigned int nbytes_to_hash = 0;
712 int err = 0;
713 unsigned int chunksize = 0;
714
715
716
717
718 unsigned int new_data_len;
719
720 unsigned int chunk_start = 0;
721 u32 db_size;
722 int pad_len = 0;
723 u32 data_pad_len = 0;
724 u32 stat_pad_len = 0;
725 struct brcm_message *mssg;
726 struct spu_request_opts req_opts;
727 struct spu_cipher_parms cipher_parms;
728 struct spu_hash_parms hash_parms;
729 struct spu_aead_parms aead_parms;
730 unsigned int local_nbuf;
731 u32 spu_hdr_len;
732 unsigned int digestsize;
733 u16 rem = 0;
734
735
736
737
738
739 u8 rx_frag_num = 3;
740 u8 tx_frag_num = 1;
741
742 flow_log("total_todo %u, total_sent %u\n",
743 rctx->total_todo, rctx->total_sent);
744
745 memset(&req_opts, 0, sizeof(req_opts));
746 memset(&cipher_parms, 0, sizeof(cipher_parms));
747 memset(&hash_parms, 0, sizeof(hash_parms));
748 memset(&aead_parms, 0, sizeof(aead_parms));
749
750 req_opts.bd_suppress = true;
751 hash_parms.alg = ctx->auth.alg;
752 hash_parms.mode = ctx->auth.mode;
753 hash_parms.type = HASH_TYPE_NONE;
754 hash_parms.key_buf = (u8 *)ctx->authkey;
755 hash_parms.key_len = ctx->authkeylen;
756
757
758
759
760
761
762
763
764 cipher_parms.type = ctx->cipher_type;
765
766 mssg = &rctx->mb_mssg;
767 chunk_start = rctx->src_sent;
768
769
770
771
772
773 nbytes_to_hash = rctx->total_todo - rctx->total_sent;
774 chunksize = nbytes_to_hash;
775 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
776 (chunksize > ctx->max_payload))
777 chunksize = ctx->max_payload;
778
779
780
781
782
783
784 if (!rctx->is_final) {
785 u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
786 u16 new_len;
787
788 rem = chunksize % blocksize;
789 if (rem) {
790
791 chunksize -= rem;
792 if (chunksize == 0) {
793
794 new_len = rem - rctx->hash_carry_len;
795 sg_copy_part_to_buf(req->src, dest, new_len,
796 rctx->src_sent);
797 rctx->hash_carry_len = rem;
798 flow_log("Exiting with hash carry len: %u\n",
799 rctx->hash_carry_len);
800 packet_dump(" buf: ",
801 rctx->hash_carry,
802 rctx->hash_carry_len);
803 return -EAGAIN;
804 }
805 }
806 }
807
808
809 local_nbuf = rctx->hash_carry_len;
810 rctx->hash_carry_len = 0;
811 if (local_nbuf)
812 tx_frag_num++;
813 new_data_len = chunksize - local_nbuf;
814
815
816 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
817 new_data_len);
818
819
820 if (hash_parms.alg == HASH_ALG_AES)
821 hash_parms.type = (enum hash_type)cipher_parms.type;
822 else
823 hash_parms.type = spu->spu_hash_type(rctx->total_sent);
824
825 digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
826 hash_parms.type);
827 hash_parms.digestsize = digestsize;
828
829
830 rctx->total_sent += chunksize;
831
832 rctx->src_sent += new_data_len;
833
834 if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
835 hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
836 hash_parms.mode,
837 chunksize,
838 blocksize);
839
840
841
842
843
844 if ((hash_parms.type == HASH_TYPE_UPDT) &&
845 (hash_parms.alg != HASH_ALG_AES)) {
846 hash_parms.key_buf = rctx->incr_hash;
847 hash_parms.key_len = digestsize;
848 }
849
850 atomic64_add(chunksize, &iproc_priv.bytes_out);
851
852 flow_log("%s() final: %u nbuf: %u ",
853 __func__, rctx->is_final, local_nbuf);
854
855 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
856 flow_log("max_payload infinite\n");
857 else
858 flow_log("max_payload %u\n", ctx->max_payload);
859
860 flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
861
862
863 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
864
865 hash_parms.prebuf_len = local_nbuf;
866 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
867 BCM_HDR_LEN,
868 &req_opts, &cipher_parms,
869 &hash_parms, &aead_parms,
870 new_data_len);
871
872 if (spu_hdr_len == 0) {
873 pr_err("Failed to create SPU request header\n");
874 return -EFAULT;
875 }
876
877
878
879
880
881 data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
882 db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
883 0, 0, hash_parms.pad_len);
884 if (spu->spu_tx_status_len())
885 stat_pad_len = spu->spu_wordalign_padlen(db_size);
886 if (stat_pad_len)
887 rx_frag_num++;
888 pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
889 if (pad_len) {
890 tx_frag_num++;
891 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
892 hash_parms.pad_len, ctx->auth.alg,
893 ctx->auth.mode, rctx->total_sent,
894 stat_pad_len);
895 }
896
897 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
898 spu_hdr_len);
899 packet_dump(" prebuf: ", rctx->hash_carry, local_nbuf);
900 flow_log("Data:\n");
901 dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
902 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
903
904
905
906
907
908 memset(mssg, 0, sizeof(*mssg));
909 mssg->type = BRCM_MESSAGE_SPU;
910 mssg->ctx = rctx;
911
912
913 err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
914 stat_pad_len);
915 if (err)
916 return err;
917
918
919 tx_frag_num += rctx->src_nents;
920 if (spu->spu_tx_status_len())
921 tx_frag_num++;
922 err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
923 local_nbuf, new_data_len, pad_len);
924 if (err)
925 return err;
926
927 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
928 if (unlikely(err < 0))
929 return err;
930
931 return -EINPROGRESS;
932}
933
934
935
936
937
938
939
940
941
942
943
944static int spu_hmac_outer_hash(struct ahash_request *req,
945 struct iproc_ctx_s *ctx)
946{
947 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
948 unsigned int blocksize =
949 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
950 int rc;
951
952 switch (ctx->auth.alg) {
953 case HASH_ALG_MD5:
954 rc = do_shash("md5", req->result, ctx->opad, blocksize,
955 req->result, ctx->digestsize, NULL, 0);
956 break;
957 case HASH_ALG_SHA1:
958 rc = do_shash("sha1", req->result, ctx->opad, blocksize,
959 req->result, ctx->digestsize, NULL, 0);
960 break;
961 case HASH_ALG_SHA224:
962 rc = do_shash("sha224", req->result, ctx->opad, blocksize,
963 req->result, ctx->digestsize, NULL, 0);
964 break;
965 case HASH_ALG_SHA256:
966 rc = do_shash("sha256", req->result, ctx->opad, blocksize,
967 req->result, ctx->digestsize, NULL, 0);
968 break;
969 case HASH_ALG_SHA384:
970 rc = do_shash("sha384", req->result, ctx->opad, blocksize,
971 req->result, ctx->digestsize, NULL, 0);
972 break;
973 case HASH_ALG_SHA512:
974 rc = do_shash("sha512", req->result, ctx->opad, blocksize,
975 req->result, ctx->digestsize, NULL, 0);
976 break;
977 default:
978 pr_err("%s() Error : unknown hmac type\n", __func__);
979 rc = -EINVAL;
980 }
981 return rc;
982}
983
984
985
986
987
988
989
990
991static int ahash_req_done(struct iproc_reqctx_s *rctx)
992{
993 struct spu_hw *spu = &iproc_priv.spu;
994 struct crypto_async_request *areq = rctx->parent;
995 struct ahash_request *req = ahash_request_cast(areq);
996 struct iproc_ctx_s *ctx = rctx->ctx;
997 int err;
998
999 memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
1000
1001 if (spu->spu_type == SPU_TYPE_SPUM) {
1002
1003
1004
1005 if (ctx->auth.alg == HASH_ALG_MD5) {
1006 __swab32s((u32 *)req->result);
1007 __swab32s(((u32 *)req->result) + 1);
1008 __swab32s(((u32 *)req->result) + 2);
1009 __swab32s(((u32 *)req->result) + 3);
1010 __swab32s(((u32 *)req->result) + 4);
1011 }
1012 }
1013
1014 flow_dump(" digest ", req->result, ctx->digestsize);
1015
1016
1017 if (rctx->is_sw_hmac) {
1018 err = spu_hmac_outer_hash(req, ctx);
1019 if (err < 0)
1020 return err;
1021 flow_dump(" hmac: ", req->result, ctx->digestsize);
1022 }
1023
1024 if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
1025 atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
1026 atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
1027 } else {
1028 atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
1029 atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
1030 }
1031
1032 return 0;
1033}
1034
1035
1036
1037
1038
1039
1040
1041static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
1042{
1043 struct iproc_ctx_s *ctx = rctx->ctx;
1044#ifdef DEBUG
1045 struct crypto_async_request *areq = rctx->parent;
1046 struct ahash_request *req = ahash_request_cast(areq);
1047 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1048 unsigned int blocksize =
1049 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
1050#endif
1051
1052
1053
1054
1055 memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1056
1057 flow_log("%s() blocksize:%u digestsize:%u\n",
1058 __func__, blocksize, ctx->digestsize);
1059
1060 atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
1061
1062 if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1063 ahash_req_done(rctx);
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090static int spu_aead_rx_sg_create(struct brcm_message *mssg,
1091 struct aead_request *req,
1092 struct iproc_reqctx_s *rctx,
1093 u8 rx_frag_num,
1094 unsigned int assoc_len,
1095 u32 ret_iv_len, unsigned int resp_len,
1096 unsigned int digestsize, u32 stat_pad_len)
1097{
1098 struct spu_hw *spu = &iproc_priv.spu;
1099 struct scatterlist *sg;
1100 struct iproc_ctx_s *ctx = rctx->ctx;
1101 u32 datalen;
1102 u32 assoc_buf_len;
1103 u8 data_padlen = 0;
1104
1105 if (ctx->is_rfc4543) {
1106
1107 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1108 assoc_len + resp_len);
1109 assoc_buf_len = assoc_len;
1110 } else {
1111 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1112 resp_len);
1113 assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
1114 assoc_len, ret_iv_len,
1115 rctx->is_encrypt);
1116 }
1117
1118 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1119
1120 data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
1121 resp_len +
1122 data_padlen);
1123
1124 if (data_padlen)
1125
1126 rx_frag_num++;
1127
1128 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
1129 rctx->gfp);
1130 if (!mssg->spu.dst)
1131 return -ENOMEM;
1132
1133 sg = mssg->spu.dst;
1134 sg_init_table(sg, rx_frag_num);
1135
1136
1137 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1138
1139 if (assoc_buf_len) {
1140
1141
1142
1143
1144 memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1145 sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1146 }
1147
1148 if (resp_len) {
1149
1150
1151
1152
1153 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1154 rctx->dst_nents, resp_len);
1155 if (datalen < (resp_len)) {
1156 pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1157 __func__, resp_len, datalen);
1158 return -EFAULT;
1159 }
1160 }
1161
1162
1163 if (data_padlen) {
1164 memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1165 sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1166 }
1167
1168
1169 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1170
1171 flow_log("stat_pad_len %u\n", stat_pad_len);
1172 if (stat_pad_len) {
1173 memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1174 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1175 }
1176
1177 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1178 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1179
1180 return 0;
1181}
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210static int spu_aead_tx_sg_create(struct brcm_message *mssg,
1211 struct iproc_reqctx_s *rctx,
1212 u8 tx_frag_num,
1213 u32 spu_hdr_len,
1214 struct scatterlist *assoc,
1215 unsigned int assoc_len,
1216 int assoc_nents,
1217 unsigned int aead_iv_len,
1218 unsigned int chunksize,
1219 u32 aad_pad_len, u32 pad_len, bool incl_icv)
1220{
1221 struct spu_hw *spu = &iproc_priv.spu;
1222 struct scatterlist *sg;
1223 struct scatterlist *assoc_sg = assoc;
1224 struct iproc_ctx_s *ctx = rctx->ctx;
1225 u32 datalen;
1226 u32 written;
1227 u32 assoc_offset = 0;
1228 u32 stat_len;
1229
1230 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
1231 rctx->gfp);
1232 if (!mssg->spu.src)
1233 return -ENOMEM;
1234
1235 sg = mssg->spu.src;
1236 sg_init_table(sg, tx_frag_num);
1237
1238 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1239 BCM_HDR_LEN + spu_hdr_len);
1240
1241 if (assoc_len) {
1242
1243 written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
1244 assoc_nents, assoc_len);
1245 if (written < assoc_len) {
1246 pr_err("%s(): failed to copy assoc sg to mbox msg",
1247 __func__);
1248 return -EFAULT;
1249 }
1250 }
1251
1252 if (aead_iv_len)
1253 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1254
1255 if (aad_pad_len) {
1256 memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1257 sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1258 }
1259
1260 datalen = chunksize;
1261 if ((chunksize > ctx->digestsize) && incl_icv)
1262 datalen -= ctx->digestsize;
1263 if (datalen) {
1264
1265 written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1266 rctx->src_nents, datalen);
1267 if (written < datalen) {
1268 pr_err("%s(): failed to copy src sg to mbox msg",
1269 __func__);
1270 return -EFAULT;
1271 }
1272 }
1273
1274 if (pad_len) {
1275 memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1276 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1277 }
1278
1279 if (incl_icv)
1280 sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1281
1282 stat_len = spu->spu_tx_status_len();
1283 if (stat_len) {
1284 memset(rctx->msg_buf.tx_stat, 0, stat_len);
1285 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1286 }
1287 return 0;
1288}
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307static int handle_aead_req(struct iproc_reqctx_s *rctx)
1308{
1309 struct spu_hw *spu = &iproc_priv.spu;
1310 struct crypto_async_request *areq = rctx->parent;
1311 struct aead_request *req = container_of(areq,
1312 struct aead_request, base);
1313 struct iproc_ctx_s *ctx = rctx->ctx;
1314 int err;
1315 unsigned int chunksize;
1316 unsigned int resp_len;
1317 u32 spu_hdr_len;
1318 u32 db_size;
1319 u32 stat_pad_len;
1320 u32 pad_len;
1321 struct brcm_message *mssg;
1322 struct spu_request_opts req_opts;
1323 struct spu_cipher_parms cipher_parms;
1324 struct spu_hash_parms hash_parms;
1325 struct spu_aead_parms aead_parms;
1326 int assoc_nents = 0;
1327 bool incl_icv = false;
1328 unsigned int digestsize = ctx->digestsize;
1329
1330
1331
1332 u8 rx_frag_num = 2;
1333 u8 tx_frag_num = 1;
1334
1335
1336 chunksize = rctx->total_todo;
1337
1338 flow_log("%s: chunksize %u\n", __func__, chunksize);
1339
1340 memset(&req_opts, 0, sizeof(req_opts));
1341 memset(&hash_parms, 0, sizeof(hash_parms));
1342 memset(&aead_parms, 0, sizeof(aead_parms));
1343
1344 req_opts.is_inbound = !(rctx->is_encrypt);
1345 req_opts.auth_first = ctx->auth_first;
1346 req_opts.is_aead = true;
1347 req_opts.is_esp = ctx->is_esp;
1348
1349 cipher_parms.alg = ctx->cipher.alg;
1350 cipher_parms.mode = ctx->cipher.mode;
1351 cipher_parms.type = ctx->cipher_type;
1352 cipher_parms.key_buf = ctx->enckey;
1353 cipher_parms.key_len = ctx->enckeylen;
1354 cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1355 cipher_parms.iv_len = rctx->iv_ctr_len;
1356
1357 hash_parms.alg = ctx->auth.alg;
1358 hash_parms.mode = ctx->auth.mode;
1359 hash_parms.type = HASH_TYPE_NONE;
1360 hash_parms.key_buf = (u8 *)ctx->authkey;
1361 hash_parms.key_len = ctx->authkeylen;
1362 hash_parms.digestsize = digestsize;
1363
1364 if ((ctx->auth.alg == HASH_ALG_SHA224) &&
1365 (ctx->authkeylen < SHA224_DIGEST_SIZE))
1366 hash_parms.key_len = SHA224_DIGEST_SIZE;
1367
1368 aead_parms.assoc_size = req->assoclen;
1369 if (ctx->is_esp && !ctx->is_rfc4543) {
1370
1371
1372
1373
1374
1375 aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
1376
1377 if (rctx->is_encrypt) {
1378 aead_parms.return_iv = true;
1379 aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
1380 aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
1381 }
1382 } else {
1383 aead_parms.ret_iv_len = 0;
1384 }
1385
1386
1387
1388
1389
1390
1391
1392 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1393 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1394 if (aead_parms.assoc_size)
1395 assoc_nents = spu_sg_count(rctx->assoc, 0,
1396 aead_parms.assoc_size);
1397
1398 mssg = &rctx->mb_mssg;
1399
1400 rctx->total_sent = chunksize;
1401 rctx->src_sent = chunksize;
1402 if (spu->spu_assoc_resp_len(ctx->cipher.mode,
1403 aead_parms.assoc_size,
1404 aead_parms.ret_iv_len,
1405 rctx->is_encrypt))
1406 rx_frag_num++;
1407
1408 aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
1409 rctx->iv_ctr_len);
1410
1411 if (ctx->auth.alg == HASH_ALG_AES)
1412 hash_parms.type = (enum hash_type)ctx->cipher_type;
1413
1414
1415 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1416 aead_parms.assoc_size);
1417
1418
1419 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1420 chunksize);
1421
1422 if (ctx->cipher.mode == CIPHER_MODE_CCM) {
1423
1424
1425
1426
1427 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
1428 ctx->cipher.mode,
1429 aead_parms.assoc_size + 2);
1430
1431
1432
1433
1434
1435 if (!rctx->is_encrypt)
1436 aead_parms.data_pad_len =
1437 spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1438 chunksize - digestsize);
1439
1440
1441 spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
1442 chunksize, rctx->is_encrypt,
1443 ctx->is_esp);
1444 }
1445
1446 if (ctx->is_rfc4543) {
1447
1448
1449
1450
1451 aead_parms.aad_pad_len = 0;
1452 if (!rctx->is_encrypt)
1453 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1454 ctx->cipher.mode,
1455 aead_parms.assoc_size + chunksize -
1456 digestsize);
1457 else
1458 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1459 ctx->cipher.mode,
1460 aead_parms.assoc_size + chunksize);
1461
1462 req_opts.is_rfc4543 = true;
1463 }
1464
1465 if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1466 incl_icv = true;
1467 tx_frag_num++;
1468
1469 sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1470 req->assoclen + rctx->total_sent -
1471 digestsize);
1472 }
1473
1474 atomic64_add(chunksize, &iproc_priv.bytes_out);
1475
1476 flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
1477
1478
1479 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1480
1481 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1482 BCM_HDR_LEN, &req_opts,
1483 &cipher_parms, &hash_parms,
1484 &aead_parms, chunksize);
1485
1486
1487 db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
1488 chunksize, aead_parms.aad_pad_len,
1489 aead_parms.data_pad_len, 0);
1490
1491 stat_pad_len = spu->spu_wordalign_padlen(db_size);
1492
1493 if (stat_pad_len)
1494 rx_frag_num++;
1495 pad_len = aead_parms.data_pad_len + stat_pad_len;
1496 if (pad_len) {
1497 tx_frag_num++;
1498 spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1499 aead_parms.data_pad_len, 0,
1500 ctx->auth.alg, ctx->auth.mode,
1501 rctx->total_sent, stat_pad_len);
1502 }
1503
1504 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1505 spu_hdr_len);
1506 dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1507 packet_dump(" aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1508 packet_log("BD:\n");
1509 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1510 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1511
1512
1513
1514
1515
1516 memset(mssg, 0, sizeof(*mssg));
1517 mssg->type = BRCM_MESSAGE_SPU;
1518 mssg->ctx = rctx;
1519
1520
1521 rx_frag_num += rctx->dst_nents;
1522 resp_len = chunksize;
1523
1524
1525
1526
1527
1528
1529 rx_frag_num++;
1530
1531 if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
1532 (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1533
1534
1535
1536
1537 resp_len -= ctx->digestsize;
1538 if (resp_len == 0)
1539
1540 rx_frag_num -= rctx->dst_nents;
1541 }
1542
1543 err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1544 aead_parms.assoc_size,
1545 aead_parms.ret_iv_len, resp_len, digestsize,
1546 stat_pad_len);
1547 if (err)
1548 return err;
1549
1550
1551 tx_frag_num += rctx->src_nents;
1552 tx_frag_num += assoc_nents;
1553 if (aead_parms.aad_pad_len)
1554 tx_frag_num++;
1555 if (aead_parms.iv_len)
1556 tx_frag_num++;
1557 if (spu->spu_tx_status_len())
1558 tx_frag_num++;
1559 err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1560 rctx->assoc, aead_parms.assoc_size,
1561 assoc_nents, aead_parms.iv_len, chunksize,
1562 aead_parms.aad_pad_len, pad_len, incl_icv);
1563 if (err)
1564 return err;
1565
1566 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
1567 if (unlikely(err < 0))
1568 return err;
1569
1570 return -EINPROGRESS;
1571}
1572
1573
1574
1575
1576
1577static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1578{
1579 struct spu_hw *spu = &iproc_priv.spu;
1580 struct crypto_async_request *areq = rctx->parent;
1581 struct aead_request *req = container_of(areq,
1582 struct aead_request, base);
1583 struct iproc_ctx_s *ctx = rctx->ctx;
1584 u32 payload_len;
1585 unsigned int icv_offset;
1586 u32 result_len;
1587
1588
1589 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1590 flow_log("payload_len %u\n", payload_len);
1591
1592
1593 atomic64_add(payload_len, &iproc_priv.bytes_in);
1594
1595 if (req->assoclen)
1596 packet_dump(" assoc_data ", rctx->msg_buf.a.resp_aad,
1597 req->assoclen);
1598
1599
1600
1601
1602
1603
1604 result_len = req->cryptlen;
1605 if (rctx->is_encrypt) {
1606 icv_offset = req->assoclen + rctx->total_sent;
1607 packet_dump(" ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1608 flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
1609 sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1610 ctx->digestsize, icv_offset);
1611 result_len += ctx->digestsize;
1612 }
1613
1614 packet_log("response data: ");
1615 dump_sg(req->dst, req->assoclen, result_len);
1616
1617 atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
1618 if (ctx->cipher.alg == CIPHER_ALG_AES) {
1619 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1620 atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
1621 else if (ctx->cipher.mode == CIPHER_MODE_GCM)
1622 atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
1623 else
1624 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1625 } else {
1626 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1627 }
1628}
1629
1630
1631
1632
1633
1634
1635
1636
1637static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1638{
1639
1640 struct brcm_message *mssg = &rctx->mb_mssg;
1641
1642 kfree(mssg->spu.src);
1643 kfree(mssg->spu.dst);
1644 memset(mssg, 0, sizeof(struct brcm_message));
1645}
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655static void finish_req(struct iproc_reqctx_s *rctx, int err)
1656{
1657 struct crypto_async_request *areq = rctx->parent;
1658
1659 flow_log("%s() err:%d\n\n", __func__, err);
1660
1661
1662 spu_chunk_cleanup(rctx);
1663
1664 if (areq)
1665 areq->complete(areq, err);
1666}
1667
1668
1669
1670
1671
1672
1673static void spu_rx_callback(struct mbox_client *cl, void *msg)
1674{
1675 struct spu_hw *spu = &iproc_priv.spu;
1676 struct brcm_message *mssg = msg;
1677 struct iproc_reqctx_s *rctx;
1678 struct iproc_ctx_s *ctx;
1679 struct crypto_async_request *areq;
1680 int err = 0;
1681
1682 rctx = mssg->ctx;
1683 if (unlikely(!rctx)) {
1684
1685 pr_err("%s(): no request context", __func__);
1686 err = -EFAULT;
1687 goto cb_finish;
1688 }
1689 areq = rctx->parent;
1690 ctx = rctx->ctx;
1691
1692
1693 err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1694 if (err != 0) {
1695 if (err == SPU_INVALID_ICV)
1696 atomic_inc(&iproc_priv.bad_icv);
1697 err = -EBADMSG;
1698 goto cb_finish;
1699 }
1700
1701
1702 switch (rctx->ctx->alg->type) {
1703 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1704 handle_ablkcipher_resp(rctx);
1705 break;
1706 case CRYPTO_ALG_TYPE_AHASH:
1707 handle_ahash_resp(rctx);
1708 break;
1709 case CRYPTO_ALG_TYPE_AEAD:
1710 handle_aead_resp(rctx);
1711 break;
1712 default:
1713 err = -EINVAL;
1714 goto cb_finish;
1715 }
1716
1717
1718
1719
1720
1721 if (rctx->total_sent < rctx->total_todo) {
1722
1723 spu_chunk_cleanup(rctx);
1724
1725 switch (rctx->ctx->alg->type) {
1726 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1727 err = handle_ablkcipher_req(rctx);
1728 break;
1729 case CRYPTO_ALG_TYPE_AHASH:
1730 err = handle_ahash_req(rctx);
1731 if (err == -EAGAIN)
1732
1733
1734
1735
1736 err = 0;
1737 break;
1738 case CRYPTO_ALG_TYPE_AEAD:
1739 err = handle_aead_req(rctx);
1740 break;
1741 default:
1742 err = -EINVAL;
1743 }
1744
1745 if (err == -EINPROGRESS)
1746
1747 return;
1748 }
1749
1750cb_finish:
1751 finish_req(rctx, err);
1752}
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
1766{
1767 struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req);
1768 struct iproc_ctx_s *ctx =
1769 crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
1770 int err;
1771
1772 flow_log("%s() enc:%u\n", __func__, encrypt);
1773
1774 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1775 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1776 rctx->parent = &req->base;
1777 rctx->is_encrypt = encrypt;
1778 rctx->bd_suppress = false;
1779 rctx->total_todo = req->nbytes;
1780 rctx->src_sent = 0;
1781 rctx->total_sent = 0;
1782 rctx->total_received = 0;
1783 rctx->ctx = ctx;
1784
1785
1786 rctx->src_sg = req->src;
1787 rctx->src_nents = 0;
1788 rctx->src_skip = 0;
1789 rctx->dst_sg = req->dst;
1790 rctx->dst_nents = 0;
1791 rctx->dst_skip = 0;
1792
1793 if (ctx->cipher.mode == CIPHER_MODE_CBC ||
1794 ctx->cipher.mode == CIPHER_MODE_CTR ||
1795 ctx->cipher.mode == CIPHER_MODE_OFB ||
1796 ctx->cipher.mode == CIPHER_MODE_XTS ||
1797 ctx->cipher.mode == CIPHER_MODE_GCM ||
1798 ctx->cipher.mode == CIPHER_MODE_CCM) {
1799 rctx->iv_ctr_len =
1800 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
1801 memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len);
1802 } else {
1803 rctx->iv_ctr_len = 0;
1804 }
1805
1806
1807 rctx->chan_idx = select_channel();
1808 err = handle_ablkcipher_req(rctx);
1809 if (err != -EINPROGRESS)
1810
1811 spu_chunk_cleanup(rctx);
1812
1813 return err;
1814}
1815
1816static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1817 unsigned int keylen)
1818{
1819 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1820 u32 tmp[DES_EXPKEY_WORDS];
1821
1822 if (keylen == DES_KEY_SIZE) {
1823 if (des_ekey(tmp, key) == 0) {
1824 if (crypto_ablkcipher_get_flags(cipher) &
1825 CRYPTO_TFM_REQ_WEAK_KEY) {
1826 u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
1827
1828 crypto_ablkcipher_set_flags(cipher, flags);
1829 return -EINVAL;
1830 }
1831 }
1832
1833 ctx->cipher_type = CIPHER_TYPE_DES;
1834 } else {
1835 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1836 return -EINVAL;
1837 }
1838 return 0;
1839}
1840
1841static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1842 unsigned int keylen)
1843{
1844 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1845
1846 if (keylen == (DES_KEY_SIZE * 3)) {
1847 const u32 *K = (const u32 *)key;
1848 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
1849
1850 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
1851 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
1852 crypto_ablkcipher_set_flags(cipher, flags);
1853 return -EINVAL;
1854 }
1855
1856 ctx->cipher_type = CIPHER_TYPE_3DES;
1857 } else {
1858 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1859 return -EINVAL;
1860 }
1861 return 0;
1862}
1863
1864static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1865 unsigned int keylen)
1866{
1867 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1868
1869 if (ctx->cipher.mode == CIPHER_MODE_XTS)
1870
1871 keylen = keylen / 2;
1872
1873 switch (keylen) {
1874 case AES_KEYSIZE_128:
1875 ctx->cipher_type = CIPHER_TYPE_AES128;
1876 break;
1877 case AES_KEYSIZE_192:
1878 ctx->cipher_type = CIPHER_TYPE_AES192;
1879 break;
1880 case AES_KEYSIZE_256:
1881 ctx->cipher_type = CIPHER_TYPE_AES256;
1882 break;
1883 default:
1884 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1885 return -EINVAL;
1886 }
1887 WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
1888 ((ctx->max_payload % AES_BLOCK_SIZE) != 0));
1889 return 0;
1890}
1891
1892static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1893 unsigned int keylen)
1894{
1895 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1896 int i;
1897
1898 ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
1899
1900 ctx->enckey[0] = 0x00;
1901 ctx->enckey[1] = 0x00;
1902 ctx->enckey[2] = 0x00;
1903 ctx->enckey[3] = 0x00;
1904 for (i = 0; i < ARC4_MAX_KEY_SIZE; i++)
1905 ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen];
1906
1907 ctx->cipher_type = CIPHER_TYPE_INIT;
1908
1909 return 0;
1910}
1911
1912static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1913 unsigned int keylen)
1914{
1915 struct spu_hw *spu = &iproc_priv.spu;
1916 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1917 struct spu_cipher_parms cipher_parms;
1918 u32 alloc_len = 0;
1919 int err;
1920
1921 flow_log("ablkcipher_setkey() keylen: %d\n", keylen);
1922 flow_dump(" key: ", key, keylen);
1923
1924 switch (ctx->cipher.alg) {
1925 case CIPHER_ALG_DES:
1926 err = des_setkey(cipher, key, keylen);
1927 break;
1928 case CIPHER_ALG_3DES:
1929 err = threedes_setkey(cipher, key, keylen);
1930 break;
1931 case CIPHER_ALG_AES:
1932 err = aes_setkey(cipher, key, keylen);
1933 break;
1934 case CIPHER_ALG_RC4:
1935 err = rc4_setkey(cipher, key, keylen);
1936 break;
1937 default:
1938 pr_err("%s() Error: unknown cipher alg\n", __func__);
1939 err = -EINVAL;
1940 }
1941 if (err)
1942 return err;
1943
1944
1945 if (ctx->cipher.alg != CIPHER_ALG_RC4) {
1946 memcpy(ctx->enckey, key, keylen);
1947 ctx->enckeylen = keylen;
1948 }
1949
1950 if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
1951 (ctx->cipher.mode == CIPHER_MODE_XTS)) {
1952 unsigned int xts_keylen = keylen / 2;
1953
1954 memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
1955 memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
1956 }
1957
1958 if (spu->spu_type == SPU_TYPE_SPUM)
1959 alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
1960 else if (spu->spu_type == SPU_TYPE_SPU2)
1961 alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
1962 memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
1963 cipher_parms.iv_buf = NULL;
1964 cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher);
1965 flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
1966
1967 cipher_parms.alg = ctx->cipher.alg;
1968 cipher_parms.mode = ctx->cipher.mode;
1969 cipher_parms.type = ctx->cipher_type;
1970 cipher_parms.key_buf = ctx->enckey;
1971 cipher_parms.key_len = ctx->enckeylen;
1972
1973
1974 memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1975 ctx->spu_req_hdr_len =
1976 spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
1977 &cipher_parms);
1978
1979 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
1980 ctx->enckeylen,
1981 false);
1982
1983 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
1984
1985 return 0;
1986}
1987
1988static int ablkcipher_encrypt(struct ablkcipher_request *req)
1989{
1990 flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes);
1991
1992 return ablkcipher_enqueue(req, true);
1993}
1994
1995static int ablkcipher_decrypt(struct ablkcipher_request *req)
1996{
1997 flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes);
1998 return ablkcipher_enqueue(req, false);
1999}
2000
2001static int ahash_enqueue(struct ahash_request *req)
2002{
2003 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2004 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2005 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2006 int err = 0;
2007 const char *alg_name;
2008
2009 flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
2010
2011 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2012 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2013 rctx->parent = &req->base;
2014 rctx->ctx = ctx;
2015 rctx->bd_suppress = true;
2016 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2017
2018
2019 rctx->src_sg = req->src;
2020 rctx->src_skip = 0;
2021 rctx->src_nents = 0;
2022 rctx->dst_sg = NULL;
2023 rctx->dst_skip = 0;
2024 rctx->dst_nents = 0;
2025
2026
2027 if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
2028 (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
2029 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2030 flow_log("Doing %sfinal %s zero-len hash request in software\n",
2031 rctx->is_final ? "" : "non-", alg_name);
2032 err = do_shash((unsigned char *)alg_name, req->result,
2033 NULL, 0, NULL, 0, ctx->authkey,
2034 ctx->authkeylen);
2035 if (err < 0)
2036 flow_log("Hash request failed with error %d\n", err);
2037 return err;
2038 }
2039
2040 rctx->chan_idx = select_channel();
2041
2042 err = handle_ahash_req(rctx);
2043 if (err != -EINPROGRESS)
2044
2045 spu_chunk_cleanup(rctx);
2046
2047 if (err == -EAGAIN)
2048
2049
2050
2051
2052 err = 0;
2053
2054 return err;
2055}
2056
2057static int __ahash_init(struct ahash_request *req)
2058{
2059 struct spu_hw *spu = &iproc_priv.spu;
2060 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2061 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2062 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2063
2064 flow_log("%s()\n", __func__);
2065
2066
2067 rctx->hash_carry_len = 0;
2068 rctx->is_final = 0;
2069
2070 rctx->total_todo = 0;
2071 rctx->src_sent = 0;
2072 rctx->total_sent = 0;
2073 rctx->total_received = 0;
2074
2075 ctx->digestsize = crypto_ahash_digestsize(tfm);
2076
2077 WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
2078
2079 rctx->is_sw_hmac = false;
2080
2081 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
2082 true);
2083
2084 return 0;
2085}
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2101{
2102 struct spu_hw *spu = &iproc_priv.spu;
2103
2104 if (spu->spu_type == SPU_TYPE_SPU2)
2105 return true;
2106
2107 if ((ctx->auth.alg == HASH_ALG_AES) &&
2108 (ctx->auth.mode == HASH_MODE_XCBC))
2109 return true;
2110
2111
2112 return false;
2113}
2114
2115static int ahash_init(struct ahash_request *req)
2116{
2117 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2118 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2119 const char *alg_name;
2120 struct crypto_shash *hash;
2121 int ret;
2122 gfp_t gfp;
2123
2124 if (spu_no_incr_hash(ctx)) {
2125
2126
2127
2128
2129
2130 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2131 hash = crypto_alloc_shash(alg_name, 0, 0);
2132 if (IS_ERR(hash)) {
2133 ret = PTR_ERR(hash);
2134 goto err;
2135 }
2136
2137 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2138 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2139 ctx->shash = kmalloc(sizeof(*ctx->shash) +
2140 crypto_shash_descsize(hash), gfp);
2141 if (!ctx->shash) {
2142 ret = -ENOMEM;
2143 goto err_hash;
2144 }
2145 ctx->shash->tfm = hash;
2146 ctx->shash->flags = 0;
2147
2148
2149 if (ctx->authkeylen > 0) {
2150 ret = crypto_shash_setkey(hash, ctx->authkey,
2151 ctx->authkeylen);
2152 if (ret)
2153 goto err_shash;
2154 }
2155
2156
2157 ret = crypto_shash_init(ctx->shash);
2158 if (ret)
2159 goto err_shash;
2160 } else {
2161
2162 ret = __ahash_init(req);
2163 }
2164
2165 return ret;
2166
2167err_shash:
2168 kfree(ctx->shash);
2169err_hash:
2170 crypto_free_shash(hash);
2171err:
2172 return ret;
2173}
2174
2175static int __ahash_update(struct ahash_request *req)
2176{
2177 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2178
2179 flow_log("ahash_update() nbytes:%u\n", req->nbytes);
2180
2181 if (!req->nbytes)
2182 return 0;
2183 rctx->total_todo += req->nbytes;
2184 rctx->src_sent = 0;
2185
2186 return ahash_enqueue(req);
2187}
2188
2189static int ahash_update(struct ahash_request *req)
2190{
2191 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2192 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2193 u8 *tmpbuf;
2194 int ret;
2195 int nents;
2196 gfp_t gfp;
2197
2198 if (spu_no_incr_hash(ctx)) {
2199
2200
2201
2202
2203
2204 if (req->src)
2205 nents = sg_nents(req->src);
2206 else
2207 return -EINVAL;
2208
2209
2210 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2211 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2212 tmpbuf = kmalloc(req->nbytes, gfp);
2213 if (!tmpbuf)
2214 return -ENOMEM;
2215
2216 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2217 req->nbytes) {
2218 kfree(tmpbuf);
2219 return -EINVAL;
2220 }
2221
2222
2223 ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
2224 kfree(tmpbuf);
2225 } else {
2226
2227 ret = __ahash_update(req);
2228 }
2229
2230 return ret;
2231}
2232
2233static int __ahash_final(struct ahash_request *req)
2234{
2235 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2236
2237 flow_log("ahash_final() nbytes:%u\n", req->nbytes);
2238
2239 rctx->is_final = 1;
2240
2241 return ahash_enqueue(req);
2242}
2243
2244static int ahash_final(struct ahash_request *req)
2245{
2246 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2247 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2248 int ret;
2249
2250 if (spu_no_incr_hash(ctx)) {
2251
2252
2253
2254
2255
2256 ret = crypto_shash_final(ctx->shash, req->result);
2257
2258
2259 crypto_free_shash(ctx->shash->tfm);
2260 kfree(ctx->shash);
2261
2262 } else {
2263
2264 ret = __ahash_final(req);
2265 }
2266
2267 return ret;
2268}
2269
2270static int __ahash_finup(struct ahash_request *req)
2271{
2272 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2273
2274 flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
2275
2276 rctx->total_todo += req->nbytes;
2277 rctx->src_sent = 0;
2278 rctx->is_final = 1;
2279
2280 return ahash_enqueue(req);
2281}
2282
2283static int ahash_finup(struct ahash_request *req)
2284{
2285 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2286 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2287 u8 *tmpbuf;
2288 int ret;
2289 int nents;
2290 gfp_t gfp;
2291
2292 if (spu_no_incr_hash(ctx)) {
2293
2294
2295
2296
2297
2298 if (req->src) {
2299 nents = sg_nents(req->src);
2300 } else {
2301 ret = -EINVAL;
2302 goto ahash_finup_exit;
2303 }
2304
2305
2306 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2307 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2308 tmpbuf = kmalloc(req->nbytes, gfp);
2309 if (!tmpbuf) {
2310 ret = -ENOMEM;
2311 goto ahash_finup_exit;
2312 }
2313
2314 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2315 req->nbytes) {
2316 ret = -EINVAL;
2317 goto ahash_finup_free;
2318 }
2319
2320
2321 ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
2322 req->result);
2323 } else {
2324
2325 return __ahash_finup(req);
2326 }
2327ahash_finup_free:
2328 kfree(tmpbuf);
2329
2330ahash_finup_exit:
2331
2332 crypto_free_shash(ctx->shash->tfm);
2333 kfree(ctx->shash);
2334 return ret;
2335}
2336
2337static int ahash_digest(struct ahash_request *req)
2338{
2339 int err = 0;
2340
2341 flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
2342
2343
2344 err = __ahash_init(req);
2345 if (!err)
2346 err = __ahash_finup(req);
2347
2348 return err;
2349}
2350
2351static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2352 unsigned int keylen)
2353{
2354 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2355
2356 flow_log("%s() ahash:%p key:%p keylen:%u\n",
2357 __func__, ahash, key, keylen);
2358 flow_dump(" key: ", key, keylen);
2359
2360 if (ctx->auth.alg == HASH_ALG_AES) {
2361 switch (keylen) {
2362 case AES_KEYSIZE_128:
2363 ctx->cipher_type = CIPHER_TYPE_AES128;
2364 break;
2365 case AES_KEYSIZE_192:
2366 ctx->cipher_type = CIPHER_TYPE_AES192;
2367 break;
2368 case AES_KEYSIZE_256:
2369 ctx->cipher_type = CIPHER_TYPE_AES256;
2370 break;
2371 default:
2372 pr_err("%s() Error: Invalid key length\n", __func__);
2373 return -EINVAL;
2374 }
2375 } else {
2376 pr_err("%s() Error: unknown hash alg\n", __func__);
2377 return -EINVAL;
2378 }
2379 memcpy(ctx->authkey, key, keylen);
2380 ctx->authkeylen = keylen;
2381
2382 return 0;
2383}
2384
2385static int ahash_export(struct ahash_request *req, void *out)
2386{
2387 const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2388 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
2389
2390 spu_exp->total_todo = rctx->total_todo;
2391 spu_exp->total_sent = rctx->total_sent;
2392 spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2393 memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2394 spu_exp->hash_carry_len = rctx->hash_carry_len;
2395 memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2396
2397 return 0;
2398}
2399
2400static int ahash_import(struct ahash_request *req, const void *in)
2401{
2402 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2403 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
2404
2405 rctx->total_todo = spu_exp->total_todo;
2406 rctx->total_sent = spu_exp->total_sent;
2407 rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2408 memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2409 rctx->hash_carry_len = spu_exp->hash_carry_len;
2410 memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2411
2412 return 0;
2413}
2414
2415static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
2416 unsigned int keylen)
2417{
2418 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2419 unsigned int blocksize =
2420 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
2421 unsigned int digestsize = crypto_ahash_digestsize(ahash);
2422 unsigned int index;
2423 int rc;
2424
2425 flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2426 __func__, ahash, key, keylen, blocksize, digestsize);
2427 flow_dump(" key: ", key, keylen);
2428
2429 if (keylen > blocksize) {
2430 switch (ctx->auth.alg) {
2431 case HASH_ALG_MD5:
2432 rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
2433 0, NULL, 0);
2434 break;
2435 case HASH_ALG_SHA1:
2436 rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
2437 0, NULL, 0);
2438 break;
2439 case HASH_ALG_SHA224:
2440 rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
2441 0, NULL, 0);
2442 break;
2443 case HASH_ALG_SHA256:
2444 rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
2445 0, NULL, 0);
2446 break;
2447 case HASH_ALG_SHA384:
2448 rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
2449 0, NULL, 0);
2450 break;
2451 case HASH_ALG_SHA512:
2452 rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
2453 0, NULL, 0);
2454 break;
2455 case HASH_ALG_SHA3_224:
2456 rc = do_shash("sha3-224", ctx->authkey, key, keylen,
2457 NULL, 0, NULL, 0);
2458 break;
2459 case HASH_ALG_SHA3_256:
2460 rc = do_shash("sha3-256", ctx->authkey, key, keylen,
2461 NULL, 0, NULL, 0);
2462 break;
2463 case HASH_ALG_SHA3_384:
2464 rc = do_shash("sha3-384", ctx->authkey, key, keylen,
2465 NULL, 0, NULL, 0);
2466 break;
2467 case HASH_ALG_SHA3_512:
2468 rc = do_shash("sha3-512", ctx->authkey, key, keylen,
2469 NULL, 0, NULL, 0);
2470 break;
2471 default:
2472 pr_err("%s() Error: unknown hash alg\n", __func__);
2473 return -EINVAL;
2474 }
2475 if (rc < 0) {
2476 pr_err("%s() Error %d computing shash for %s\n",
2477 __func__, rc, hash_alg_name[ctx->auth.alg]);
2478 return rc;
2479 }
2480 ctx->authkeylen = digestsize;
2481
2482 flow_log(" keylen > digestsize... hashed\n");
2483 flow_dump(" newkey: ", ctx->authkey, ctx->authkeylen);
2484 } else {
2485 memcpy(ctx->authkey, key, keylen);
2486 ctx->authkeylen = keylen;
2487 }
2488
2489
2490
2491
2492
2493
2494 if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
2495 memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
2496 memset(ctx->ipad + ctx->authkeylen, 0,
2497 blocksize - ctx->authkeylen);
2498 ctx->authkeylen = 0;
2499 memcpy(ctx->opad, ctx->ipad, blocksize);
2500
2501 for (index = 0; index < blocksize; index++) {
2502 ctx->ipad[index] ^= HMAC_IPAD_VALUE;
2503 ctx->opad[index] ^= HMAC_OPAD_VALUE;
2504 }
2505
2506 flow_dump(" ipad: ", ctx->ipad, blocksize);
2507 flow_dump(" opad: ", ctx->opad, blocksize);
2508 }
2509 ctx->digestsize = digestsize;
2510 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
2511
2512 return 0;
2513}
2514
2515static int ahash_hmac_init(struct ahash_request *req)
2516{
2517 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2518 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2519 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2520 unsigned int blocksize =
2521 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2522
2523 flow_log("ahash_hmac_init()\n");
2524
2525
2526 ahash_init(req);
2527
2528 if (!spu_no_incr_hash(ctx)) {
2529
2530 rctx->is_sw_hmac = true;
2531 ctx->auth.mode = HASH_MODE_HASH;
2532
2533 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2534 rctx->hash_carry_len = blocksize;
2535 rctx->total_todo += blocksize;
2536 }
2537
2538 return 0;
2539}
2540
2541static int ahash_hmac_update(struct ahash_request *req)
2542{
2543 flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
2544
2545 if (!req->nbytes)
2546 return 0;
2547
2548 return ahash_update(req);
2549}
2550
2551static int ahash_hmac_final(struct ahash_request *req)
2552{
2553 flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
2554
2555 return ahash_final(req);
2556}
2557
2558static int ahash_hmac_finup(struct ahash_request *req)
2559{
2560 flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
2561
2562 return ahash_finup(req);
2563}
2564
2565static int ahash_hmac_digest(struct ahash_request *req)
2566{
2567 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2568 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2569 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2570 unsigned int blocksize =
2571 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2572
2573 flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
2574
2575
2576 __ahash_init(req);
2577
2578 if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
2579
2580
2581
2582
2583
2584
2585
2586
2587 rctx->is_sw_hmac = false;
2588 ctx->auth.mode = HASH_MODE_HMAC;
2589 } else {
2590 rctx->is_sw_hmac = true;
2591 ctx->auth.mode = HASH_MODE_HASH;
2592
2593 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2594 rctx->hash_carry_len = blocksize;
2595 rctx->total_todo += blocksize;
2596 }
2597
2598 return __ahash_finup(req);
2599}
2600
2601
2602
2603static int aead_need_fallback(struct aead_request *req)
2604{
2605 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2606 struct spu_hw *spu = &iproc_priv.spu;
2607 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2608 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2609 u32 payload_len;
2610
2611
2612
2613
2614
2615 if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
2616 (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
2617 (req->assoclen == 0)) {
2618 if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2619 (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2620 flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2621 return 1;
2622 }
2623 }
2624
2625
2626 if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2627 (spu->spu_type == SPU_TYPE_SPUM) &&
2628 (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
2629 (ctx->digestsize != 16)) {
2630 flow_log("%s() AES CCM needs fallback for digest size %d\n",
2631 __func__, ctx->digestsize);
2632 return 1;
2633 }
2634
2635
2636
2637
2638
2639 if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2640 (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
2641 (req->assoclen == 0)) {
2642 flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2643 __func__);
2644 return 1;
2645 }
2646
2647 payload_len = req->cryptlen;
2648 if (spu->spu_type == SPU_TYPE_SPUM)
2649 payload_len += req->assoclen;
2650
2651 flow_log("%s() payload len: %u\n", __func__, payload_len);
2652
2653 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2654 return 0;
2655 else
2656 return payload_len > ctx->max_payload;
2657}
2658
2659static void aead_complete(struct crypto_async_request *areq, int err)
2660{
2661 struct aead_request *req =
2662 container_of(areq, struct aead_request, base);
2663 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2664 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2665
2666 flow_log("%s() err:%d\n", __func__, err);
2667
2668 areq->tfm = crypto_aead_tfm(aead);
2669
2670 areq->complete = rctx->old_complete;
2671 areq->data = rctx->old_data;
2672
2673 areq->complete(areq, err);
2674}
2675
2676static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
2677{
2678 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2679 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
2680 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2681 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
2682 int err;
2683 u32 req_flags;
2684
2685 flow_log("%s() enc:%u\n", __func__, is_encrypt);
2686
2687 if (ctx->fallback_cipher) {
2688
2689 rctx->old_tfm = tfm;
2690 aead_request_set_tfm(req, ctx->fallback_cipher);
2691
2692
2693
2694
2695 rctx->old_complete = req->base.complete;
2696 rctx->old_data = req->base.data;
2697 req_flags = aead_request_flags(req);
2698 aead_request_set_callback(req, req_flags, aead_complete, req);
2699 err = is_encrypt ? crypto_aead_encrypt(req) :
2700 crypto_aead_decrypt(req);
2701
2702 if (err == 0) {
2703
2704
2705
2706
2707 aead_request_set_callback(req, req_flags,
2708 rctx->old_complete, req);
2709 req->base.data = rctx->old_data;
2710 aead_request_set_tfm(req, aead);
2711 flow_log("%s() fallback completed successfully\n\n",
2712 __func__);
2713 }
2714 } else {
2715 err = -EINVAL;
2716 }
2717
2718 return err;
2719}
2720
2721static int aead_enqueue(struct aead_request *req, bool is_encrypt)
2722{
2723 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2724 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2725 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2726 int err;
2727
2728 flow_log("%s() enc:%u\n", __func__, is_encrypt);
2729
2730 if (req->assoclen > MAX_ASSOC_SIZE) {
2731 pr_err
2732 ("%s() Error: associated data too long. (%u > %u bytes)\n",
2733 __func__, req->assoclen, MAX_ASSOC_SIZE);
2734 return -EINVAL;
2735 }
2736
2737 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2738 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2739 rctx->parent = &req->base;
2740 rctx->is_encrypt = is_encrypt;
2741 rctx->bd_suppress = false;
2742 rctx->total_todo = req->cryptlen;
2743 rctx->src_sent = 0;
2744 rctx->total_sent = 0;
2745 rctx->total_received = 0;
2746 rctx->is_sw_hmac = false;
2747 rctx->ctx = ctx;
2748 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2749
2750
2751 rctx->assoc = req->src;
2752
2753
2754
2755
2756
2757
2758 if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2759 &rctx->src_skip) < 0) {
2760 pr_err("%s() Error: Unable to find start of src data\n",
2761 __func__);
2762 return -EINVAL;
2763 }
2764
2765 rctx->src_nents = 0;
2766 rctx->dst_nents = 0;
2767 if (req->dst == req->src) {
2768 rctx->dst_sg = rctx->src_sg;
2769 rctx->dst_skip = rctx->src_skip;
2770 } else {
2771
2772
2773
2774
2775
2776 if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2777 &rctx->dst_skip) < 0) {
2778 pr_err("%s() Error: Unable to find start of dst data\n",
2779 __func__);
2780 return -EINVAL;
2781 }
2782 }
2783
2784 if (ctx->cipher.mode == CIPHER_MODE_CBC ||
2785 ctx->cipher.mode == CIPHER_MODE_CTR ||
2786 ctx->cipher.mode == CIPHER_MODE_OFB ||
2787 ctx->cipher.mode == CIPHER_MODE_XTS ||
2788 ctx->cipher.mode == CIPHER_MODE_GCM) {
2789 rctx->iv_ctr_len =
2790 ctx->salt_len +
2791 crypto_aead_ivsize(crypto_aead_reqtfm(req));
2792 } else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
2793 rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2794 } else {
2795 rctx->iv_ctr_len = 0;
2796 }
2797
2798 rctx->hash_carry_len = 0;
2799
2800 flow_log(" src sg: %p\n", req->src);
2801 flow_log(" rctx->src_sg: %p, src_skip %u\n",
2802 rctx->src_sg, rctx->src_skip);
2803 flow_log(" assoc: %p, assoclen %u\n", rctx->assoc, req->assoclen);
2804 flow_log(" dst sg: %p\n", req->dst);
2805 flow_log(" rctx->dst_sg: %p, dst_skip %u\n",
2806 rctx->dst_sg, rctx->dst_skip);
2807 flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len);
2808 flow_dump(" iv: ", req->iv, rctx->iv_ctr_len);
2809 flow_log(" authkeylen:%u\n", ctx->authkeylen);
2810 flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no");
2811
2812 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2813 flow_log(" max_payload infinite");
2814 else
2815 flow_log(" max_payload: %u\n", ctx->max_payload);
2816
2817 if (unlikely(aead_need_fallback(req)))
2818 return aead_do_fallback(req, is_encrypt);
2819
2820
2821
2822
2823
2824 if (rctx->iv_ctr_len) {
2825 if (ctx->salt_len)
2826 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2827 ctx->salt, ctx->salt_len);
2828 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2829 req->iv,
2830 rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2831 }
2832
2833 rctx->chan_idx = select_channel();
2834 err = handle_aead_req(rctx);
2835 if (err != -EINPROGRESS)
2836
2837 spu_chunk_cleanup(rctx);
2838
2839 return err;
2840}
2841
2842static int aead_authenc_setkey(struct crypto_aead *cipher,
2843 const u8 *key, unsigned int keylen)
2844{
2845 struct spu_hw *spu = &iproc_priv.spu;
2846 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2847 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2848 struct crypto_authenc_keys keys;
2849 int ret;
2850
2851 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2852 keylen);
2853 flow_dump(" key: ", key, keylen);
2854
2855 ret = crypto_authenc_extractkeys(&keys, key, keylen);
2856 if (ret)
2857 goto badkey;
2858
2859 if (keys.enckeylen > MAX_KEY_SIZE ||
2860 keys.authkeylen > MAX_KEY_SIZE)
2861 goto badkey;
2862
2863 ctx->enckeylen = keys.enckeylen;
2864 ctx->authkeylen = keys.authkeylen;
2865
2866 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
2867
2868 memset(ctx->authkey, 0, sizeof(ctx->authkey));
2869 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
2870
2871 switch (ctx->alg->cipher_info.alg) {
2872 case CIPHER_ALG_DES:
2873 if (ctx->enckeylen == DES_KEY_SIZE) {
2874 u32 tmp[DES_EXPKEY_WORDS];
2875 u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
2876
2877 if (des_ekey(tmp, keys.enckey) == 0) {
2878 if (crypto_aead_get_flags(cipher) &
2879 CRYPTO_TFM_REQ_WEAK_KEY) {
2880 crypto_aead_set_flags(cipher, flags);
2881 return -EINVAL;
2882 }
2883 }
2884
2885 ctx->cipher_type = CIPHER_TYPE_DES;
2886 } else {
2887 goto badkey;
2888 }
2889 break;
2890 case CIPHER_ALG_3DES:
2891 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
2892 const u32 *K = (const u32 *)keys.enckey;
2893 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
2894
2895 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
2896 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
2897 crypto_aead_set_flags(cipher, flags);
2898 return -EINVAL;
2899 }
2900
2901 ctx->cipher_type = CIPHER_TYPE_3DES;
2902 } else {
2903 crypto_aead_set_flags(cipher,
2904 CRYPTO_TFM_RES_BAD_KEY_LEN);
2905 return -EINVAL;
2906 }
2907 break;
2908 case CIPHER_ALG_AES:
2909 switch (ctx->enckeylen) {
2910 case AES_KEYSIZE_128:
2911 ctx->cipher_type = CIPHER_TYPE_AES128;
2912 break;
2913 case AES_KEYSIZE_192:
2914 ctx->cipher_type = CIPHER_TYPE_AES192;
2915 break;
2916 case AES_KEYSIZE_256:
2917 ctx->cipher_type = CIPHER_TYPE_AES256;
2918 break;
2919 default:
2920 goto badkey;
2921 }
2922 break;
2923 case CIPHER_ALG_RC4:
2924 ctx->cipher_type = CIPHER_TYPE_INIT;
2925 break;
2926 default:
2927 pr_err("%s() Error: Unknown cipher alg\n", __func__);
2928 return -EINVAL;
2929 }
2930
2931 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2932 ctx->authkeylen);
2933 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
2934 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
2935
2936
2937 if (ctx->fallback_cipher) {
2938 flow_log(" running fallback setkey()\n");
2939
2940 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2941 ctx->fallback_cipher->base.crt_flags |=
2942 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2943 ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
2944 if (ret) {
2945 flow_log(" fallback setkey() returned:%d\n", ret);
2946 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
2947 tfm->crt_flags |=
2948 (ctx->fallback_cipher->base.crt_flags &
2949 CRYPTO_TFM_RES_MASK);
2950 }
2951 }
2952
2953 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2954 ctx->enckeylen,
2955 false);
2956
2957 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2958
2959 return ret;
2960
2961badkey:
2962 ctx->enckeylen = 0;
2963 ctx->authkeylen = 0;
2964 ctx->digestsize = 0;
2965
2966 crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2967 return -EINVAL;
2968}
2969
2970static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
2971 const u8 *key, unsigned int keylen)
2972{
2973 struct spu_hw *spu = &iproc_priv.spu;
2974 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2975 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2976
2977 int ret = 0;
2978
2979 flow_log("%s() keylen:%u\n", __func__, keylen);
2980 flow_dump(" key: ", key, keylen);
2981
2982 if (!ctx->is_esp)
2983 ctx->digestsize = keylen;
2984
2985 ctx->enckeylen = keylen;
2986 ctx->authkeylen = 0;
2987 memcpy(ctx->enckey, key, ctx->enckeylen);
2988
2989 switch (ctx->enckeylen) {
2990 case AES_KEYSIZE_128:
2991 ctx->cipher_type = CIPHER_TYPE_AES128;
2992 break;
2993 case AES_KEYSIZE_192:
2994 ctx->cipher_type = CIPHER_TYPE_AES192;
2995 break;
2996 case AES_KEYSIZE_256:
2997 ctx->cipher_type = CIPHER_TYPE_AES256;
2998 break;
2999 default:
3000 goto badkey;
3001 }
3002
3003 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
3004 ctx->authkeylen);
3005 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
3006 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
3007
3008
3009 if (ctx->fallback_cipher) {
3010 flow_log(" running fallback setkey()\n");
3011
3012 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
3013 ctx->fallback_cipher->base.crt_flags |=
3014 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
3015 ret = crypto_aead_setkey(ctx->fallback_cipher, key,
3016 keylen + ctx->salt_len);
3017 if (ret) {
3018 flow_log(" fallback setkey() returned:%d\n", ret);
3019 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
3020 tfm->crt_flags |=
3021 (ctx->fallback_cipher->base.crt_flags &
3022 CRYPTO_TFM_RES_MASK);
3023 }
3024 }
3025
3026 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
3027 ctx->enckeylen,
3028 false);
3029
3030 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
3031
3032 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
3033 ctx->authkeylen);
3034
3035 return ret;
3036
3037badkey:
3038 ctx->enckeylen = 0;
3039 ctx->authkeylen = 0;
3040 ctx->digestsize = 0;
3041
3042 crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
3043 return -EINVAL;
3044}
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
3058 const u8 *key, unsigned int keylen)
3059{
3060 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3061
3062 flow_log("%s\n", __func__);
3063 ctx->salt_len = GCM_ESP_SALT_SIZE;
3064 ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3065 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3066 keylen -= GCM_ESP_SALT_SIZE;
3067 ctx->digestsize = GCM_ESP_DIGESTSIZE;
3068 ctx->is_esp = true;
3069 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3070
3071 return aead_gcm_ccm_setkey(cipher, key, keylen);
3072}
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
3086 const u8 *key, unsigned int keylen)
3087{
3088 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3089
3090 flow_log("%s\n", __func__);
3091 ctx->salt_len = GCM_ESP_SALT_SIZE;
3092 ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3093 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3094 keylen -= GCM_ESP_SALT_SIZE;
3095 ctx->digestsize = GCM_ESP_DIGESTSIZE;
3096 ctx->is_esp = true;
3097 ctx->is_rfc4543 = true;
3098 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3099
3100 return aead_gcm_ccm_setkey(cipher, key, keylen);
3101}
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
3115 const u8 *key, unsigned int keylen)
3116{
3117 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3118
3119 flow_log("%s\n", __func__);
3120 ctx->salt_len = CCM_ESP_SALT_SIZE;
3121 ctx->salt_offset = CCM_ESP_SALT_OFFSET;
3122 memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
3123 keylen -= CCM_ESP_SALT_SIZE;
3124 ctx->is_esp = true;
3125 flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
3126
3127 return aead_gcm_ccm_setkey(cipher, key, keylen);
3128}
3129
3130static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
3131{
3132 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3133 int ret = 0;
3134
3135 flow_log("%s() authkeylen:%u authsize:%u\n",
3136 __func__, ctx->authkeylen, authsize);
3137
3138 ctx->digestsize = authsize;
3139
3140
3141 if (ctx->fallback_cipher) {
3142 flow_log(" running fallback setauth()\n");
3143
3144 ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
3145 if (ret)
3146 flow_log(" fallback setauth() returned:%d\n", ret);
3147 }
3148
3149 return ret;
3150}
3151
3152static int aead_encrypt(struct aead_request *req)
3153{
3154 flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
3155 req->cryptlen);
3156 dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3157 flow_log(" assoc_len:%u\n", req->assoclen);
3158
3159 return aead_enqueue(req, true);
3160}
3161
3162static int aead_decrypt(struct aead_request *req)
3163{
3164 flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
3165 dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3166 flow_log(" assoc_len:%u\n", req->assoclen);
3167
3168 return aead_enqueue(req, false);
3169}
3170
3171
3172
3173static struct iproc_alg_s driver_algs[] = {
3174 {
3175 .type = CRYPTO_ALG_TYPE_AEAD,
3176 .alg.aead = {
3177 .base = {
3178 .cra_name = "gcm(aes)",
3179 .cra_driver_name = "gcm-aes-iproc",
3180 .cra_blocksize = AES_BLOCK_SIZE,
3181 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3182 },
3183 .setkey = aead_gcm_ccm_setkey,
3184 .ivsize = GCM_AES_IV_SIZE,
3185 .maxauthsize = AES_BLOCK_SIZE,
3186 },
3187 .cipher_info = {
3188 .alg = CIPHER_ALG_AES,
3189 .mode = CIPHER_MODE_GCM,
3190 },
3191 .auth_info = {
3192 .alg = HASH_ALG_AES,
3193 .mode = HASH_MODE_GCM,
3194 },
3195 .auth_first = 0,
3196 },
3197 {
3198 .type = CRYPTO_ALG_TYPE_AEAD,
3199 .alg.aead = {
3200 .base = {
3201 .cra_name = "ccm(aes)",
3202 .cra_driver_name = "ccm-aes-iproc",
3203 .cra_blocksize = AES_BLOCK_SIZE,
3204 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3205 },
3206 .setkey = aead_gcm_ccm_setkey,
3207 .ivsize = CCM_AES_IV_SIZE,
3208 .maxauthsize = AES_BLOCK_SIZE,
3209 },
3210 .cipher_info = {
3211 .alg = CIPHER_ALG_AES,
3212 .mode = CIPHER_MODE_CCM,
3213 },
3214 .auth_info = {
3215 .alg = HASH_ALG_AES,
3216 .mode = HASH_MODE_CCM,
3217 },
3218 .auth_first = 0,
3219 },
3220 {
3221 .type = CRYPTO_ALG_TYPE_AEAD,
3222 .alg.aead = {
3223 .base = {
3224 .cra_name = "rfc4106(gcm(aes))",
3225 .cra_driver_name = "gcm-aes-esp-iproc",
3226 .cra_blocksize = AES_BLOCK_SIZE,
3227 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3228 },
3229 .setkey = aead_gcm_esp_setkey,
3230 .ivsize = GCM_RFC4106_IV_SIZE,
3231 .maxauthsize = AES_BLOCK_SIZE,
3232 },
3233 .cipher_info = {
3234 .alg = CIPHER_ALG_AES,
3235 .mode = CIPHER_MODE_GCM,
3236 },
3237 .auth_info = {
3238 .alg = HASH_ALG_AES,
3239 .mode = HASH_MODE_GCM,
3240 },
3241 .auth_first = 0,
3242 },
3243 {
3244 .type = CRYPTO_ALG_TYPE_AEAD,
3245 .alg.aead = {
3246 .base = {
3247 .cra_name = "rfc4309(ccm(aes))",
3248 .cra_driver_name = "ccm-aes-esp-iproc",
3249 .cra_blocksize = AES_BLOCK_SIZE,
3250 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3251 },
3252 .setkey = aead_ccm_esp_setkey,
3253 .ivsize = CCM_AES_IV_SIZE,
3254 .maxauthsize = AES_BLOCK_SIZE,
3255 },
3256 .cipher_info = {
3257 .alg = CIPHER_ALG_AES,
3258 .mode = CIPHER_MODE_CCM,
3259 },
3260 .auth_info = {
3261 .alg = HASH_ALG_AES,
3262 .mode = HASH_MODE_CCM,
3263 },
3264 .auth_first = 0,
3265 },
3266 {
3267 .type = CRYPTO_ALG_TYPE_AEAD,
3268 .alg.aead = {
3269 .base = {
3270 .cra_name = "rfc4543(gcm(aes))",
3271 .cra_driver_name = "gmac-aes-esp-iproc",
3272 .cra_blocksize = AES_BLOCK_SIZE,
3273 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3274 },
3275 .setkey = rfc4543_gcm_esp_setkey,
3276 .ivsize = GCM_RFC4106_IV_SIZE,
3277 .maxauthsize = AES_BLOCK_SIZE,
3278 },
3279 .cipher_info = {
3280 .alg = CIPHER_ALG_AES,
3281 .mode = CIPHER_MODE_GCM,
3282 },
3283 .auth_info = {
3284 .alg = HASH_ALG_AES,
3285 .mode = HASH_MODE_GCM,
3286 },
3287 .auth_first = 0,
3288 },
3289 {
3290 .type = CRYPTO_ALG_TYPE_AEAD,
3291 .alg.aead = {
3292 .base = {
3293 .cra_name = "authenc(hmac(md5),cbc(aes))",
3294 .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
3295 .cra_blocksize = AES_BLOCK_SIZE,
3296 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3297 },
3298 .setkey = aead_authenc_setkey,
3299 .ivsize = AES_BLOCK_SIZE,
3300 .maxauthsize = MD5_DIGEST_SIZE,
3301 },
3302 .cipher_info = {
3303 .alg = CIPHER_ALG_AES,
3304 .mode = CIPHER_MODE_CBC,
3305 },
3306 .auth_info = {
3307 .alg = HASH_ALG_MD5,
3308 .mode = HASH_MODE_HMAC,
3309 },
3310 .auth_first = 0,
3311 },
3312 {
3313 .type = CRYPTO_ALG_TYPE_AEAD,
3314 .alg.aead = {
3315 .base = {
3316 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3317 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
3318 .cra_blocksize = AES_BLOCK_SIZE,
3319 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3320 },
3321 .setkey = aead_authenc_setkey,
3322 .ivsize = AES_BLOCK_SIZE,
3323 .maxauthsize = SHA1_DIGEST_SIZE,
3324 },
3325 .cipher_info = {
3326 .alg = CIPHER_ALG_AES,
3327 .mode = CIPHER_MODE_CBC,
3328 },
3329 .auth_info = {
3330 .alg = HASH_ALG_SHA1,
3331 .mode = HASH_MODE_HMAC,
3332 },
3333 .auth_first = 0,
3334 },
3335 {
3336 .type = CRYPTO_ALG_TYPE_AEAD,
3337 .alg.aead = {
3338 .base = {
3339 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3340 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
3341 .cra_blocksize = AES_BLOCK_SIZE,
3342 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3343 },
3344 .setkey = aead_authenc_setkey,
3345 .ivsize = AES_BLOCK_SIZE,
3346 .maxauthsize = SHA256_DIGEST_SIZE,
3347 },
3348 .cipher_info = {
3349 .alg = CIPHER_ALG_AES,
3350 .mode = CIPHER_MODE_CBC,
3351 },
3352 .auth_info = {
3353 .alg = HASH_ALG_SHA256,
3354 .mode = HASH_MODE_HMAC,
3355 },
3356 .auth_first = 0,
3357 },
3358 {
3359 .type = CRYPTO_ALG_TYPE_AEAD,
3360 .alg.aead = {
3361 .base = {
3362 .cra_name = "authenc(hmac(md5),cbc(des))",
3363 .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
3364 .cra_blocksize = DES_BLOCK_SIZE,
3365 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3366 },
3367 .setkey = aead_authenc_setkey,
3368 .ivsize = DES_BLOCK_SIZE,
3369 .maxauthsize = MD5_DIGEST_SIZE,
3370 },
3371 .cipher_info = {
3372 .alg = CIPHER_ALG_DES,
3373 .mode = CIPHER_MODE_CBC,
3374 },
3375 .auth_info = {
3376 .alg = HASH_ALG_MD5,
3377 .mode = HASH_MODE_HMAC,
3378 },
3379 .auth_first = 0,
3380 },
3381 {
3382 .type = CRYPTO_ALG_TYPE_AEAD,
3383 .alg.aead = {
3384 .base = {
3385 .cra_name = "authenc(hmac(sha1),cbc(des))",
3386 .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
3387 .cra_blocksize = DES_BLOCK_SIZE,
3388 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3389 },
3390 .setkey = aead_authenc_setkey,
3391 .ivsize = DES_BLOCK_SIZE,
3392 .maxauthsize = SHA1_DIGEST_SIZE,
3393 },
3394 .cipher_info = {
3395 .alg = CIPHER_ALG_DES,
3396 .mode = CIPHER_MODE_CBC,
3397 },
3398 .auth_info = {
3399 .alg = HASH_ALG_SHA1,
3400 .mode = HASH_MODE_HMAC,
3401 },
3402 .auth_first = 0,
3403 },
3404 {
3405 .type = CRYPTO_ALG_TYPE_AEAD,
3406 .alg.aead = {
3407 .base = {
3408 .cra_name = "authenc(hmac(sha224),cbc(des))",
3409 .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
3410 .cra_blocksize = DES_BLOCK_SIZE,
3411 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3412 },
3413 .setkey = aead_authenc_setkey,
3414 .ivsize = DES_BLOCK_SIZE,
3415 .maxauthsize = SHA224_DIGEST_SIZE,
3416 },
3417 .cipher_info = {
3418 .alg = CIPHER_ALG_DES,
3419 .mode = CIPHER_MODE_CBC,
3420 },
3421 .auth_info = {
3422 .alg = HASH_ALG_SHA224,
3423 .mode = HASH_MODE_HMAC,
3424 },
3425 .auth_first = 0,
3426 },
3427 {
3428 .type = CRYPTO_ALG_TYPE_AEAD,
3429 .alg.aead = {
3430 .base = {
3431 .cra_name = "authenc(hmac(sha256),cbc(des))",
3432 .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
3433 .cra_blocksize = DES_BLOCK_SIZE,
3434 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3435 },
3436 .setkey = aead_authenc_setkey,
3437 .ivsize = DES_BLOCK_SIZE,
3438 .maxauthsize = SHA256_DIGEST_SIZE,
3439 },
3440 .cipher_info = {
3441 .alg = CIPHER_ALG_DES,
3442 .mode = CIPHER_MODE_CBC,
3443 },
3444 .auth_info = {
3445 .alg = HASH_ALG_SHA256,
3446 .mode = HASH_MODE_HMAC,
3447 },
3448 .auth_first = 0,
3449 },
3450 {
3451 .type = CRYPTO_ALG_TYPE_AEAD,
3452 .alg.aead = {
3453 .base = {
3454 .cra_name = "authenc(hmac(sha384),cbc(des))",
3455 .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
3456 .cra_blocksize = DES_BLOCK_SIZE,
3457 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3458 },
3459 .setkey = aead_authenc_setkey,
3460 .ivsize = DES_BLOCK_SIZE,
3461 .maxauthsize = SHA384_DIGEST_SIZE,
3462 },
3463 .cipher_info = {
3464 .alg = CIPHER_ALG_DES,
3465 .mode = CIPHER_MODE_CBC,
3466 },
3467 .auth_info = {
3468 .alg = HASH_ALG_SHA384,
3469 .mode = HASH_MODE_HMAC,
3470 },
3471 .auth_first = 0,
3472 },
3473 {
3474 .type = CRYPTO_ALG_TYPE_AEAD,
3475 .alg.aead = {
3476 .base = {
3477 .cra_name = "authenc(hmac(sha512),cbc(des))",
3478 .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
3479 .cra_blocksize = DES_BLOCK_SIZE,
3480 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3481 },
3482 .setkey = aead_authenc_setkey,
3483 .ivsize = DES_BLOCK_SIZE,
3484 .maxauthsize = SHA512_DIGEST_SIZE,
3485 },
3486 .cipher_info = {
3487 .alg = CIPHER_ALG_DES,
3488 .mode = CIPHER_MODE_CBC,
3489 },
3490 .auth_info = {
3491 .alg = HASH_ALG_SHA512,
3492 .mode = HASH_MODE_HMAC,
3493 },
3494 .auth_first = 0,
3495 },
3496 {
3497 .type = CRYPTO_ALG_TYPE_AEAD,
3498 .alg.aead = {
3499 .base = {
3500 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3501 .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
3502 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3503 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3504 },
3505 .setkey = aead_authenc_setkey,
3506 .ivsize = DES3_EDE_BLOCK_SIZE,
3507 .maxauthsize = MD5_DIGEST_SIZE,
3508 },
3509 .cipher_info = {
3510 .alg = CIPHER_ALG_3DES,
3511 .mode = CIPHER_MODE_CBC,
3512 },
3513 .auth_info = {
3514 .alg = HASH_ALG_MD5,
3515 .mode = HASH_MODE_HMAC,
3516 },
3517 .auth_first = 0,
3518 },
3519 {
3520 .type = CRYPTO_ALG_TYPE_AEAD,
3521 .alg.aead = {
3522 .base = {
3523 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3524 .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
3525 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3526 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3527 },
3528 .setkey = aead_authenc_setkey,
3529 .ivsize = DES3_EDE_BLOCK_SIZE,
3530 .maxauthsize = SHA1_DIGEST_SIZE,
3531 },
3532 .cipher_info = {
3533 .alg = CIPHER_ALG_3DES,
3534 .mode = CIPHER_MODE_CBC,
3535 },
3536 .auth_info = {
3537 .alg = HASH_ALG_SHA1,
3538 .mode = HASH_MODE_HMAC,
3539 },
3540 .auth_first = 0,
3541 },
3542 {
3543 .type = CRYPTO_ALG_TYPE_AEAD,
3544 .alg.aead = {
3545 .base = {
3546 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
3547 .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
3548 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3549 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3550 },
3551 .setkey = aead_authenc_setkey,
3552 .ivsize = DES3_EDE_BLOCK_SIZE,
3553 .maxauthsize = SHA224_DIGEST_SIZE,
3554 },
3555 .cipher_info = {
3556 .alg = CIPHER_ALG_3DES,
3557 .mode = CIPHER_MODE_CBC,
3558 },
3559 .auth_info = {
3560 .alg = HASH_ALG_SHA224,
3561 .mode = HASH_MODE_HMAC,
3562 },
3563 .auth_first = 0,
3564 },
3565 {
3566 .type = CRYPTO_ALG_TYPE_AEAD,
3567 .alg.aead = {
3568 .base = {
3569 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
3570 .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
3571 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3572 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3573 },
3574 .setkey = aead_authenc_setkey,
3575 .ivsize = DES3_EDE_BLOCK_SIZE,
3576 .maxauthsize = SHA256_DIGEST_SIZE,
3577 },
3578 .cipher_info = {
3579 .alg = CIPHER_ALG_3DES,
3580 .mode = CIPHER_MODE_CBC,
3581 },
3582 .auth_info = {
3583 .alg = HASH_ALG_SHA256,
3584 .mode = HASH_MODE_HMAC,
3585 },
3586 .auth_first = 0,
3587 },
3588 {
3589 .type = CRYPTO_ALG_TYPE_AEAD,
3590 .alg.aead = {
3591 .base = {
3592 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
3593 .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
3594 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3595 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3596 },
3597 .setkey = aead_authenc_setkey,
3598 .ivsize = DES3_EDE_BLOCK_SIZE,
3599 .maxauthsize = SHA384_DIGEST_SIZE,
3600 },
3601 .cipher_info = {
3602 .alg = CIPHER_ALG_3DES,
3603 .mode = CIPHER_MODE_CBC,
3604 },
3605 .auth_info = {
3606 .alg = HASH_ALG_SHA384,
3607 .mode = HASH_MODE_HMAC,
3608 },
3609 .auth_first = 0,
3610 },
3611 {
3612 .type = CRYPTO_ALG_TYPE_AEAD,
3613 .alg.aead = {
3614 .base = {
3615 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
3616 .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
3617 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3618 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3619 },
3620 .setkey = aead_authenc_setkey,
3621 .ivsize = DES3_EDE_BLOCK_SIZE,
3622 .maxauthsize = SHA512_DIGEST_SIZE,
3623 },
3624 .cipher_info = {
3625 .alg = CIPHER_ALG_3DES,
3626 .mode = CIPHER_MODE_CBC,
3627 },
3628 .auth_info = {
3629 .alg = HASH_ALG_SHA512,
3630 .mode = HASH_MODE_HMAC,
3631 },
3632 .auth_first = 0,
3633 },
3634
3635
3636 {
3637 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3638 .alg.crypto = {
3639 .cra_name = "ecb(arc4)",
3640 .cra_driver_name = "ecb-arc4-iproc",
3641 .cra_blocksize = ARC4_BLOCK_SIZE,
3642 .cra_ablkcipher = {
3643 .min_keysize = ARC4_MIN_KEY_SIZE,
3644 .max_keysize = ARC4_MAX_KEY_SIZE,
3645 .ivsize = 0,
3646 }
3647 },
3648 .cipher_info = {
3649 .alg = CIPHER_ALG_RC4,
3650 .mode = CIPHER_MODE_NONE,
3651 },
3652 .auth_info = {
3653 .alg = HASH_ALG_NONE,
3654 .mode = HASH_MODE_NONE,
3655 },
3656 },
3657 {
3658 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3659 .alg.crypto = {
3660 .cra_name = "ofb(des)",
3661 .cra_driver_name = "ofb-des-iproc",
3662 .cra_blocksize = DES_BLOCK_SIZE,
3663 .cra_ablkcipher = {
3664 .min_keysize = DES_KEY_SIZE,
3665 .max_keysize = DES_KEY_SIZE,
3666 .ivsize = DES_BLOCK_SIZE,
3667 }
3668 },
3669 .cipher_info = {
3670 .alg = CIPHER_ALG_DES,
3671 .mode = CIPHER_MODE_OFB,
3672 },
3673 .auth_info = {
3674 .alg = HASH_ALG_NONE,
3675 .mode = HASH_MODE_NONE,
3676 },
3677 },
3678 {
3679 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3680 .alg.crypto = {
3681 .cra_name = "cbc(des)",
3682 .cra_driver_name = "cbc-des-iproc",
3683 .cra_blocksize = DES_BLOCK_SIZE,
3684 .cra_ablkcipher = {
3685 .min_keysize = DES_KEY_SIZE,
3686 .max_keysize = DES_KEY_SIZE,
3687 .ivsize = DES_BLOCK_SIZE,
3688 }
3689 },
3690 .cipher_info = {
3691 .alg = CIPHER_ALG_DES,
3692 .mode = CIPHER_MODE_CBC,
3693 },
3694 .auth_info = {
3695 .alg = HASH_ALG_NONE,
3696 .mode = HASH_MODE_NONE,
3697 },
3698 },
3699 {
3700 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3701 .alg.crypto = {
3702 .cra_name = "ecb(des)",
3703 .cra_driver_name = "ecb-des-iproc",
3704 .cra_blocksize = DES_BLOCK_SIZE,
3705 .cra_ablkcipher = {
3706 .min_keysize = DES_KEY_SIZE,
3707 .max_keysize = DES_KEY_SIZE,
3708 .ivsize = 0,
3709 }
3710 },
3711 .cipher_info = {
3712 .alg = CIPHER_ALG_DES,
3713 .mode = CIPHER_MODE_ECB,
3714 },
3715 .auth_info = {
3716 .alg = HASH_ALG_NONE,
3717 .mode = HASH_MODE_NONE,
3718 },
3719 },
3720 {
3721 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3722 .alg.crypto = {
3723 .cra_name = "ofb(des3_ede)",
3724 .cra_driver_name = "ofb-des3-iproc",
3725 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3726 .cra_ablkcipher = {
3727 .min_keysize = DES3_EDE_KEY_SIZE,
3728 .max_keysize = DES3_EDE_KEY_SIZE,
3729 .ivsize = DES3_EDE_BLOCK_SIZE,
3730 }
3731 },
3732 .cipher_info = {
3733 .alg = CIPHER_ALG_3DES,
3734 .mode = CIPHER_MODE_OFB,
3735 },
3736 .auth_info = {
3737 .alg = HASH_ALG_NONE,
3738 .mode = HASH_MODE_NONE,
3739 },
3740 },
3741 {
3742 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3743 .alg.crypto = {
3744 .cra_name = "cbc(des3_ede)",
3745 .cra_driver_name = "cbc-des3-iproc",
3746 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3747 .cra_ablkcipher = {
3748 .min_keysize = DES3_EDE_KEY_SIZE,
3749 .max_keysize = DES3_EDE_KEY_SIZE,
3750 .ivsize = DES3_EDE_BLOCK_SIZE,
3751 }
3752 },
3753 .cipher_info = {
3754 .alg = CIPHER_ALG_3DES,
3755 .mode = CIPHER_MODE_CBC,
3756 },
3757 .auth_info = {
3758 .alg = HASH_ALG_NONE,
3759 .mode = HASH_MODE_NONE,
3760 },
3761 },
3762 {
3763 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3764 .alg.crypto = {
3765 .cra_name = "ecb(des3_ede)",
3766 .cra_driver_name = "ecb-des3-iproc",
3767 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3768 .cra_ablkcipher = {
3769 .min_keysize = DES3_EDE_KEY_SIZE,
3770 .max_keysize = DES3_EDE_KEY_SIZE,
3771 .ivsize = 0,
3772 }
3773 },
3774 .cipher_info = {
3775 .alg = CIPHER_ALG_3DES,
3776 .mode = CIPHER_MODE_ECB,
3777 },
3778 .auth_info = {
3779 .alg = HASH_ALG_NONE,
3780 .mode = HASH_MODE_NONE,
3781 },
3782 },
3783 {
3784 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3785 .alg.crypto = {
3786 .cra_name = "ofb(aes)",
3787 .cra_driver_name = "ofb-aes-iproc",
3788 .cra_blocksize = AES_BLOCK_SIZE,
3789 .cra_ablkcipher = {
3790 .min_keysize = AES_MIN_KEY_SIZE,
3791 .max_keysize = AES_MAX_KEY_SIZE,
3792 .ivsize = AES_BLOCK_SIZE,
3793 }
3794 },
3795 .cipher_info = {
3796 .alg = CIPHER_ALG_AES,
3797 .mode = CIPHER_MODE_OFB,
3798 },
3799 .auth_info = {
3800 .alg = HASH_ALG_NONE,
3801 .mode = HASH_MODE_NONE,
3802 },
3803 },
3804 {
3805 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3806 .alg.crypto = {
3807 .cra_name = "cbc(aes)",
3808 .cra_driver_name = "cbc-aes-iproc",
3809 .cra_blocksize = AES_BLOCK_SIZE,
3810 .cra_ablkcipher = {
3811 .min_keysize = AES_MIN_KEY_SIZE,
3812 .max_keysize = AES_MAX_KEY_SIZE,
3813 .ivsize = AES_BLOCK_SIZE,
3814 }
3815 },
3816 .cipher_info = {
3817 .alg = CIPHER_ALG_AES,
3818 .mode = CIPHER_MODE_CBC,
3819 },
3820 .auth_info = {
3821 .alg = HASH_ALG_NONE,
3822 .mode = HASH_MODE_NONE,
3823 },
3824 },
3825 {
3826 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3827 .alg.crypto = {
3828 .cra_name = "ecb(aes)",
3829 .cra_driver_name = "ecb-aes-iproc",
3830 .cra_blocksize = AES_BLOCK_SIZE,
3831 .cra_ablkcipher = {
3832 .min_keysize = AES_MIN_KEY_SIZE,
3833 .max_keysize = AES_MAX_KEY_SIZE,
3834 .ivsize = 0,
3835 }
3836 },
3837 .cipher_info = {
3838 .alg = CIPHER_ALG_AES,
3839 .mode = CIPHER_MODE_ECB,
3840 },
3841 .auth_info = {
3842 .alg = HASH_ALG_NONE,
3843 .mode = HASH_MODE_NONE,
3844 },
3845 },
3846 {
3847 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3848 .alg.crypto = {
3849 .cra_name = "ctr(aes)",
3850 .cra_driver_name = "ctr-aes-iproc",
3851 .cra_blocksize = AES_BLOCK_SIZE,
3852 .cra_ablkcipher = {
3853 .min_keysize = AES_MIN_KEY_SIZE,
3854 .max_keysize = AES_MAX_KEY_SIZE,
3855 .ivsize = AES_BLOCK_SIZE,
3856 }
3857 },
3858 .cipher_info = {
3859 .alg = CIPHER_ALG_AES,
3860 .mode = CIPHER_MODE_CTR,
3861 },
3862 .auth_info = {
3863 .alg = HASH_ALG_NONE,
3864 .mode = HASH_MODE_NONE,
3865 },
3866 },
3867{
3868 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3869 .alg.crypto = {
3870 .cra_name = "xts(aes)",
3871 .cra_driver_name = "xts-aes-iproc",
3872 .cra_blocksize = AES_BLOCK_SIZE,
3873 .cra_ablkcipher = {
3874 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3875 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3876 .ivsize = AES_BLOCK_SIZE,
3877 }
3878 },
3879 .cipher_info = {
3880 .alg = CIPHER_ALG_AES,
3881 .mode = CIPHER_MODE_XTS,
3882 },
3883 .auth_info = {
3884 .alg = HASH_ALG_NONE,
3885 .mode = HASH_MODE_NONE,
3886 },
3887 },
3888
3889
3890 {
3891 .type = CRYPTO_ALG_TYPE_AHASH,
3892 .alg.hash = {
3893 .halg.digestsize = MD5_DIGEST_SIZE,
3894 .halg.base = {
3895 .cra_name = "md5",
3896 .cra_driver_name = "md5-iproc",
3897 .cra_blocksize = MD5_BLOCK_WORDS * 4,
3898 .cra_flags = CRYPTO_ALG_ASYNC,
3899 }
3900 },
3901 .cipher_info = {
3902 .alg = CIPHER_ALG_NONE,
3903 .mode = CIPHER_MODE_NONE,
3904 },
3905 .auth_info = {
3906 .alg = HASH_ALG_MD5,
3907 .mode = HASH_MODE_HASH,
3908 },
3909 },
3910 {
3911 .type = CRYPTO_ALG_TYPE_AHASH,
3912 .alg.hash = {
3913 .halg.digestsize = MD5_DIGEST_SIZE,
3914 .halg.base = {
3915 .cra_name = "hmac(md5)",
3916 .cra_driver_name = "hmac-md5-iproc",
3917 .cra_blocksize = MD5_BLOCK_WORDS * 4,
3918 }
3919 },
3920 .cipher_info = {
3921 .alg = CIPHER_ALG_NONE,
3922 .mode = CIPHER_MODE_NONE,
3923 },
3924 .auth_info = {
3925 .alg = HASH_ALG_MD5,
3926 .mode = HASH_MODE_HMAC,
3927 },
3928 },
3929 {.type = CRYPTO_ALG_TYPE_AHASH,
3930 .alg.hash = {
3931 .halg.digestsize = SHA1_DIGEST_SIZE,
3932 .halg.base = {
3933 .cra_name = "sha1",
3934 .cra_driver_name = "sha1-iproc",
3935 .cra_blocksize = SHA1_BLOCK_SIZE,
3936 }
3937 },
3938 .cipher_info = {
3939 .alg = CIPHER_ALG_NONE,
3940 .mode = CIPHER_MODE_NONE,
3941 },
3942 .auth_info = {
3943 .alg = HASH_ALG_SHA1,
3944 .mode = HASH_MODE_HASH,
3945 },
3946 },
3947 {.type = CRYPTO_ALG_TYPE_AHASH,
3948 .alg.hash = {
3949 .halg.digestsize = SHA1_DIGEST_SIZE,
3950 .halg.base = {
3951 .cra_name = "hmac(sha1)",
3952 .cra_driver_name = "hmac-sha1-iproc",
3953 .cra_blocksize = SHA1_BLOCK_SIZE,
3954 }
3955 },
3956 .cipher_info = {
3957 .alg = CIPHER_ALG_NONE,
3958 .mode = CIPHER_MODE_NONE,
3959 },
3960 .auth_info = {
3961 .alg = HASH_ALG_SHA1,
3962 .mode = HASH_MODE_HMAC,
3963 },
3964 },
3965 {.type = CRYPTO_ALG_TYPE_AHASH,
3966 .alg.hash = {
3967 .halg.digestsize = SHA224_DIGEST_SIZE,
3968 .halg.base = {
3969 .cra_name = "sha224",
3970 .cra_driver_name = "sha224-iproc",
3971 .cra_blocksize = SHA224_BLOCK_SIZE,
3972 }
3973 },
3974 .cipher_info = {
3975 .alg = CIPHER_ALG_NONE,
3976 .mode = CIPHER_MODE_NONE,
3977 },
3978 .auth_info = {
3979 .alg = HASH_ALG_SHA224,
3980 .mode = HASH_MODE_HASH,
3981 },
3982 },
3983 {.type = CRYPTO_ALG_TYPE_AHASH,
3984 .alg.hash = {
3985 .halg.digestsize = SHA224_DIGEST_SIZE,
3986 .halg.base = {
3987 .cra_name = "hmac(sha224)",
3988 .cra_driver_name = "hmac-sha224-iproc",
3989 .cra_blocksize = SHA224_BLOCK_SIZE,
3990 }
3991 },
3992 .cipher_info = {
3993 .alg = CIPHER_ALG_NONE,
3994 .mode = CIPHER_MODE_NONE,
3995 },
3996 .auth_info = {
3997 .alg = HASH_ALG_SHA224,
3998 .mode = HASH_MODE_HMAC,
3999 },
4000 },
4001 {.type = CRYPTO_ALG_TYPE_AHASH,
4002 .alg.hash = {
4003 .halg.digestsize = SHA256_DIGEST_SIZE,
4004 .halg.base = {
4005 .cra_name = "sha256",
4006 .cra_driver_name = "sha256-iproc",
4007 .cra_blocksize = SHA256_BLOCK_SIZE,
4008 }
4009 },
4010 .cipher_info = {
4011 .alg = CIPHER_ALG_NONE,
4012 .mode = CIPHER_MODE_NONE,
4013 },
4014 .auth_info = {
4015 .alg = HASH_ALG_SHA256,
4016 .mode = HASH_MODE_HASH,
4017 },
4018 },
4019 {.type = CRYPTO_ALG_TYPE_AHASH,
4020 .alg.hash = {
4021 .halg.digestsize = SHA256_DIGEST_SIZE,
4022 .halg.base = {
4023 .cra_name = "hmac(sha256)",
4024 .cra_driver_name = "hmac-sha256-iproc",
4025 .cra_blocksize = SHA256_BLOCK_SIZE,
4026 }
4027 },
4028 .cipher_info = {
4029 .alg = CIPHER_ALG_NONE,
4030 .mode = CIPHER_MODE_NONE,
4031 },
4032 .auth_info = {
4033 .alg = HASH_ALG_SHA256,
4034 .mode = HASH_MODE_HMAC,
4035 },
4036 },
4037 {
4038 .type = CRYPTO_ALG_TYPE_AHASH,
4039 .alg.hash = {
4040 .halg.digestsize = SHA384_DIGEST_SIZE,
4041 .halg.base = {
4042 .cra_name = "sha384",
4043 .cra_driver_name = "sha384-iproc",
4044 .cra_blocksize = SHA384_BLOCK_SIZE,
4045 }
4046 },
4047 .cipher_info = {
4048 .alg = CIPHER_ALG_NONE,
4049 .mode = CIPHER_MODE_NONE,
4050 },
4051 .auth_info = {
4052 .alg = HASH_ALG_SHA384,
4053 .mode = HASH_MODE_HASH,
4054 },
4055 },
4056 {
4057 .type = CRYPTO_ALG_TYPE_AHASH,
4058 .alg.hash = {
4059 .halg.digestsize = SHA384_DIGEST_SIZE,
4060 .halg.base = {
4061 .cra_name = "hmac(sha384)",
4062 .cra_driver_name = "hmac-sha384-iproc",
4063 .cra_blocksize = SHA384_BLOCK_SIZE,
4064 }
4065 },
4066 .cipher_info = {
4067 .alg = CIPHER_ALG_NONE,
4068 .mode = CIPHER_MODE_NONE,
4069 },
4070 .auth_info = {
4071 .alg = HASH_ALG_SHA384,
4072 .mode = HASH_MODE_HMAC,
4073 },
4074 },
4075 {
4076 .type = CRYPTO_ALG_TYPE_AHASH,
4077 .alg.hash = {
4078 .halg.digestsize = SHA512_DIGEST_SIZE,
4079 .halg.base = {
4080 .cra_name = "sha512",
4081 .cra_driver_name = "sha512-iproc",
4082 .cra_blocksize = SHA512_BLOCK_SIZE,
4083 }
4084 },
4085 .cipher_info = {
4086 .alg = CIPHER_ALG_NONE,
4087 .mode = CIPHER_MODE_NONE,
4088 },
4089 .auth_info = {
4090 .alg = HASH_ALG_SHA512,
4091 .mode = HASH_MODE_HASH,
4092 },
4093 },
4094 {
4095 .type = CRYPTO_ALG_TYPE_AHASH,
4096 .alg.hash = {
4097 .halg.digestsize = SHA512_DIGEST_SIZE,
4098 .halg.base = {
4099 .cra_name = "hmac(sha512)",
4100 .cra_driver_name = "hmac-sha512-iproc",
4101 .cra_blocksize = SHA512_BLOCK_SIZE,
4102 }
4103 },
4104 .cipher_info = {
4105 .alg = CIPHER_ALG_NONE,
4106 .mode = CIPHER_MODE_NONE,
4107 },
4108 .auth_info = {
4109 .alg = HASH_ALG_SHA512,
4110 .mode = HASH_MODE_HMAC,
4111 },
4112 },
4113 {
4114 .type = CRYPTO_ALG_TYPE_AHASH,
4115 .alg.hash = {
4116 .halg.digestsize = SHA3_224_DIGEST_SIZE,
4117 .halg.base = {
4118 .cra_name = "sha3-224",
4119 .cra_driver_name = "sha3-224-iproc",
4120 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4121 }
4122 },
4123 .cipher_info = {
4124 .alg = CIPHER_ALG_NONE,
4125 .mode = CIPHER_MODE_NONE,
4126 },
4127 .auth_info = {
4128 .alg = HASH_ALG_SHA3_224,
4129 .mode = HASH_MODE_HASH,
4130 },
4131 },
4132 {
4133 .type = CRYPTO_ALG_TYPE_AHASH,
4134 .alg.hash = {
4135 .halg.digestsize = SHA3_224_DIGEST_SIZE,
4136 .halg.base = {
4137 .cra_name = "hmac(sha3-224)",
4138 .cra_driver_name = "hmac-sha3-224-iproc",
4139 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4140 }
4141 },
4142 .cipher_info = {
4143 .alg = CIPHER_ALG_NONE,
4144 .mode = CIPHER_MODE_NONE,
4145 },
4146 .auth_info = {
4147 .alg = HASH_ALG_SHA3_224,
4148 .mode = HASH_MODE_HMAC
4149 },
4150 },
4151 {
4152 .type = CRYPTO_ALG_TYPE_AHASH,
4153 .alg.hash = {
4154 .halg.digestsize = SHA3_256_DIGEST_SIZE,
4155 .halg.base = {
4156 .cra_name = "sha3-256",
4157 .cra_driver_name = "sha3-256-iproc",
4158 .cra_blocksize = SHA3_256_BLOCK_SIZE,
4159 }
4160 },
4161 .cipher_info = {
4162 .alg = CIPHER_ALG_NONE,
4163 .mode = CIPHER_MODE_NONE,
4164 },
4165 .auth_info = {
4166 .alg = HASH_ALG_SHA3_256,
4167 .mode = HASH_MODE_HASH,
4168 },
4169 },
4170 {
4171 .type = CRYPTO_ALG_TYPE_AHASH,
4172 .alg.hash = {
4173 .halg.digestsize = SHA3_256_DIGEST_SIZE,
4174 .halg.base = {
4175 .cra_name = "hmac(sha3-256)",
4176 .cra_driver_name = "hmac-sha3-256-iproc",
4177 .cra_blocksize = SHA3_256_BLOCK_SIZE,
4178 }
4179 },
4180 .cipher_info = {
4181 .alg = CIPHER_ALG_NONE,
4182 .mode = CIPHER_MODE_NONE,
4183 },
4184 .auth_info = {
4185 .alg = HASH_ALG_SHA3_256,
4186 .mode = HASH_MODE_HMAC,
4187 },
4188 },
4189 {
4190 .type = CRYPTO_ALG_TYPE_AHASH,
4191 .alg.hash = {
4192 .halg.digestsize = SHA3_384_DIGEST_SIZE,
4193 .halg.base = {
4194 .cra_name = "sha3-384",
4195 .cra_driver_name = "sha3-384-iproc",
4196 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4197 }
4198 },
4199 .cipher_info = {
4200 .alg = CIPHER_ALG_NONE,
4201 .mode = CIPHER_MODE_NONE,
4202 },
4203 .auth_info = {
4204 .alg = HASH_ALG_SHA3_384,
4205 .mode = HASH_MODE_HASH,
4206 },
4207 },
4208 {
4209 .type = CRYPTO_ALG_TYPE_AHASH,
4210 .alg.hash = {
4211 .halg.digestsize = SHA3_384_DIGEST_SIZE,
4212 .halg.base = {
4213 .cra_name = "hmac(sha3-384)",
4214 .cra_driver_name = "hmac-sha3-384-iproc",
4215 .cra_blocksize = SHA3_384_BLOCK_SIZE,
4216 }
4217 },
4218 .cipher_info = {
4219 .alg = CIPHER_ALG_NONE,
4220 .mode = CIPHER_MODE_NONE,
4221 },
4222 .auth_info = {
4223 .alg = HASH_ALG_SHA3_384,
4224 .mode = HASH_MODE_HMAC,
4225 },
4226 },
4227 {
4228 .type = CRYPTO_ALG_TYPE_AHASH,
4229 .alg.hash = {
4230 .halg.digestsize = SHA3_512_DIGEST_SIZE,
4231 .halg.base = {
4232 .cra_name = "sha3-512",
4233 .cra_driver_name = "sha3-512-iproc",
4234 .cra_blocksize = SHA3_512_BLOCK_SIZE,
4235 }
4236 },
4237 .cipher_info = {
4238 .alg = CIPHER_ALG_NONE,
4239 .mode = CIPHER_MODE_NONE,
4240 },
4241 .auth_info = {
4242 .alg = HASH_ALG_SHA3_512,
4243 .mode = HASH_MODE_HASH,
4244 },
4245 },
4246 {
4247 .type = CRYPTO_ALG_TYPE_AHASH,
4248 .alg.hash = {
4249 .halg.digestsize = SHA3_512_DIGEST_SIZE,
4250 .halg.base = {
4251 .cra_name = "hmac(sha3-512)",
4252 .cra_driver_name = "hmac-sha3-512-iproc",
4253 .cra_blocksize = SHA3_512_BLOCK_SIZE,
4254 }
4255 },
4256 .cipher_info = {
4257 .alg = CIPHER_ALG_NONE,
4258 .mode = CIPHER_MODE_NONE,
4259 },
4260 .auth_info = {
4261 .alg = HASH_ALG_SHA3_512,
4262 .mode = HASH_MODE_HMAC,
4263 },
4264 },
4265 {
4266 .type = CRYPTO_ALG_TYPE_AHASH,
4267 .alg.hash = {
4268 .halg.digestsize = AES_BLOCK_SIZE,
4269 .halg.base = {
4270 .cra_name = "xcbc(aes)",
4271 .cra_driver_name = "xcbc-aes-iproc",
4272 .cra_blocksize = AES_BLOCK_SIZE,
4273 }
4274 },
4275 .cipher_info = {
4276 .alg = CIPHER_ALG_NONE,
4277 .mode = CIPHER_MODE_NONE,
4278 },
4279 .auth_info = {
4280 .alg = HASH_ALG_AES,
4281 .mode = HASH_MODE_XCBC,
4282 },
4283 },
4284 {
4285 .type = CRYPTO_ALG_TYPE_AHASH,
4286 .alg.hash = {
4287 .halg.digestsize = AES_BLOCK_SIZE,
4288 .halg.base = {
4289 .cra_name = "cmac(aes)",
4290 .cra_driver_name = "cmac-aes-iproc",
4291 .cra_blocksize = AES_BLOCK_SIZE,
4292 }
4293 },
4294 .cipher_info = {
4295 .alg = CIPHER_ALG_NONE,
4296 .mode = CIPHER_MODE_NONE,
4297 },
4298 .auth_info = {
4299 .alg = HASH_ALG_AES,
4300 .mode = HASH_MODE_CMAC,
4301 },
4302 },
4303};
4304
4305static int generic_cra_init(struct crypto_tfm *tfm,
4306 struct iproc_alg_s *cipher_alg)
4307{
4308 struct spu_hw *spu = &iproc_priv.spu;
4309 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4310 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
4311
4312 flow_log("%s()\n", __func__);
4313
4314 ctx->alg = cipher_alg;
4315 ctx->cipher = cipher_alg->cipher_info;
4316 ctx->auth = cipher_alg->auth_info;
4317 ctx->auth_first = cipher_alg->auth_first;
4318 ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
4319 ctx->cipher.mode,
4320 blocksize);
4321 ctx->fallback_cipher = NULL;
4322
4323 ctx->enckeylen = 0;
4324 ctx->authkeylen = 0;
4325
4326 atomic_inc(&iproc_priv.stream_count);
4327 atomic_inc(&iproc_priv.session_count);
4328
4329 return 0;
4330}
4331
4332static int ablkcipher_cra_init(struct crypto_tfm *tfm)
4333{
4334 struct crypto_alg *alg = tfm->__crt_alg;
4335 struct iproc_alg_s *cipher_alg;
4336
4337 flow_log("%s()\n", __func__);
4338
4339 tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s);
4340
4341 cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto);
4342 return generic_cra_init(tfm, cipher_alg);
4343}
4344
4345static int ahash_cra_init(struct crypto_tfm *tfm)
4346{
4347 int err;
4348 struct crypto_alg *alg = tfm->__crt_alg;
4349 struct iproc_alg_s *cipher_alg;
4350
4351 cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
4352 alg.hash);
4353
4354 err = generic_cra_init(tfm, cipher_alg);
4355 flow_log("%s()\n", __func__);
4356
4357
4358
4359
4360
4361 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4362 sizeof(struct iproc_reqctx_s));
4363
4364 return err;
4365}
4366
4367static int aead_cra_init(struct crypto_aead *aead)
4368{
4369 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4370 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4371 struct crypto_alg *alg = tfm->__crt_alg;
4372 struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
4373 struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
4374 alg.aead);
4375
4376 int err = generic_cra_init(tfm, cipher_alg);
4377
4378 flow_log("%s()\n", __func__);
4379
4380 crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
4381 ctx->is_esp = false;
4382 ctx->salt_len = 0;
4383 ctx->salt_offset = 0;
4384
4385
4386 get_random_bytes(ctx->iv, MAX_IV_SIZE);
4387 flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE);
4388
4389 if (!err) {
4390 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
4391 flow_log("%s() creating fallback cipher\n", __func__);
4392
4393 ctx->fallback_cipher =
4394 crypto_alloc_aead(alg->cra_name, 0,
4395 CRYPTO_ALG_ASYNC |
4396 CRYPTO_ALG_NEED_FALLBACK);
4397 if (IS_ERR(ctx->fallback_cipher)) {
4398 pr_err("%s() Error: failed to allocate fallback for %s\n",
4399 __func__, alg->cra_name);
4400 return PTR_ERR(ctx->fallback_cipher);
4401 }
4402 }
4403 }
4404
4405 return err;
4406}
4407
4408static void generic_cra_exit(struct crypto_tfm *tfm)
4409{
4410 atomic_dec(&iproc_priv.session_count);
4411}
4412
4413static void aead_cra_exit(struct crypto_aead *aead)
4414{
4415 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4416 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4417
4418 generic_cra_exit(tfm);
4419
4420 if (ctx->fallback_cipher) {
4421 crypto_free_aead(ctx->fallback_cipher);
4422 ctx->fallback_cipher = NULL;
4423 }
4424}
4425
4426
4427
4428
4429
4430
4431
4432
4433static void spu_functions_register(struct device *dev,
4434 enum spu_spu_type spu_type,
4435 enum spu_spu_subtype spu_subtype)
4436{
4437 struct spu_hw *spu = &iproc_priv.spu;
4438
4439 if (spu_type == SPU_TYPE_SPUM) {
4440 dev_dbg(dev, "Registering SPUM functions");
4441 spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
4442 spu->spu_payload_length = spum_payload_length;
4443 spu->spu_response_hdr_len = spum_response_hdr_len;
4444 spu->spu_hash_pad_len = spum_hash_pad_len;
4445 spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
4446 spu->spu_assoc_resp_len = spum_assoc_resp_len;
4447 spu->spu_aead_ivlen = spum_aead_ivlen;
4448 spu->spu_hash_type = spum_hash_type;
4449 spu->spu_digest_size = spum_digest_size;
4450 spu->spu_create_request = spum_create_request;
4451 spu->spu_cipher_req_init = spum_cipher_req_init;
4452 spu->spu_cipher_req_finish = spum_cipher_req_finish;
4453 spu->spu_request_pad = spum_request_pad;
4454 spu->spu_tx_status_len = spum_tx_status_len;
4455 spu->spu_rx_status_len = spum_rx_status_len;
4456 spu->spu_status_process = spum_status_process;
4457 spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
4458 spu->spu_ccm_update_iv = spum_ccm_update_iv;
4459 spu->spu_wordalign_padlen = spum_wordalign_padlen;
4460 if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
4461 spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
4462 else
4463 spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
4464 } else {
4465 dev_dbg(dev, "Registering SPU2 functions");
4466 spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
4467 spu->spu_ctx_max_payload = spu2_ctx_max_payload;
4468 spu->spu_payload_length = spu2_payload_length;
4469 spu->spu_response_hdr_len = spu2_response_hdr_len;
4470 spu->spu_hash_pad_len = spu2_hash_pad_len;
4471 spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
4472 spu->spu_assoc_resp_len = spu2_assoc_resp_len;
4473 spu->spu_aead_ivlen = spu2_aead_ivlen;
4474 spu->spu_hash_type = spu2_hash_type;
4475 spu->spu_digest_size = spu2_digest_size;
4476 spu->spu_create_request = spu2_create_request;
4477 spu->spu_cipher_req_init = spu2_cipher_req_init;
4478 spu->spu_cipher_req_finish = spu2_cipher_req_finish;
4479 spu->spu_request_pad = spu2_request_pad;
4480 spu->spu_tx_status_len = spu2_tx_status_len;
4481 spu->spu_rx_status_len = spu2_rx_status_len;
4482 spu->spu_status_process = spu2_status_process;
4483 spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
4484 spu->spu_ccm_update_iv = spu2_ccm_update_iv;
4485 spu->spu_wordalign_padlen = spu2_wordalign_padlen;
4486 }
4487}
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497static int spu_mb_init(struct device *dev)
4498{
4499 struct mbox_client *mcl = &iproc_priv.mcl;
4500 int err, i;
4501
4502 iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
4503 sizeof(struct mbox_chan *), GFP_KERNEL);
4504 if (!iproc_priv.mbox)
4505 return -ENOMEM;
4506
4507 mcl->dev = dev;
4508 mcl->tx_block = false;
4509 mcl->tx_tout = 0;
4510 mcl->knows_txdone = true;
4511 mcl->rx_callback = spu_rx_callback;
4512 mcl->tx_done = NULL;
4513
4514 for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4515 iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
4516 if (IS_ERR(iproc_priv.mbox[i])) {
4517 err = (int)PTR_ERR(iproc_priv.mbox[i]);
4518 dev_err(dev,
4519 "Mbox channel %d request failed with err %d",
4520 i, err);
4521 iproc_priv.mbox[i] = NULL;
4522 goto free_channels;
4523 }
4524 }
4525
4526 return 0;
4527free_channels:
4528 for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4529 if (iproc_priv.mbox[i])
4530 mbox_free_channel(iproc_priv.mbox[i]);
4531 }
4532
4533 return err;
4534}
4535
4536static void spu_mb_release(struct platform_device *pdev)
4537{
4538 int i;
4539
4540 for (i = 0; i < iproc_priv.spu.num_chan; i++)
4541 mbox_free_channel(iproc_priv.mbox[i]);
4542}
4543
4544static void spu_counters_init(void)
4545{
4546 int i;
4547 int j;
4548
4549 atomic_set(&iproc_priv.session_count, 0);
4550 atomic_set(&iproc_priv.stream_count, 0);
4551 atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
4552 atomic64_set(&iproc_priv.bytes_in, 0);
4553 atomic64_set(&iproc_priv.bytes_out, 0);
4554 for (i = 0; i < SPU_OP_NUM; i++) {
4555 atomic_set(&iproc_priv.op_counts[i], 0);
4556 atomic_set(&iproc_priv.setkey_cnt[i], 0);
4557 }
4558 for (i = 0; i < CIPHER_ALG_LAST; i++)
4559 for (j = 0; j < CIPHER_MODE_LAST; j++)
4560 atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
4561
4562 for (i = 0; i < HASH_ALG_LAST; i++) {
4563 atomic_set(&iproc_priv.hash_cnt[i], 0);
4564 atomic_set(&iproc_priv.hmac_cnt[i], 0);
4565 }
4566 for (i = 0; i < AEAD_TYPE_LAST; i++)
4567 atomic_set(&iproc_priv.aead_cnt[i], 0);
4568
4569 atomic_set(&iproc_priv.mb_no_spc, 0);
4570 atomic_set(&iproc_priv.mb_send_fail, 0);
4571 atomic_set(&iproc_priv.bad_icv, 0);
4572}
4573
4574static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
4575{
4576 struct spu_hw *spu = &iproc_priv.spu;
4577 struct crypto_alg *crypto = &driver_alg->alg.crypto;
4578 int err;
4579
4580
4581 if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) &&
4582 (spu->spu_type == SPU_TYPE_SPU2))
4583 return 0;
4584
4585 crypto->cra_module = THIS_MODULE;
4586 crypto->cra_priority = cipher_pri;
4587 crypto->cra_alignmask = 0;
4588 crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
4589
4590 crypto->cra_init = ablkcipher_cra_init;
4591 crypto->cra_exit = generic_cra_exit;
4592 crypto->cra_type = &crypto_ablkcipher_type;
4593 crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4594 CRYPTO_ALG_KERN_DRIVER_ONLY;
4595
4596 crypto->cra_ablkcipher.setkey = ablkcipher_setkey;
4597 crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt;
4598 crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt;
4599
4600 err = crypto_register_alg(crypto);
4601
4602 if (err == 0)
4603 driver_alg->registered = true;
4604 pr_debug(" registered ablkcipher %s\n", crypto->cra_driver_name);
4605 return err;
4606}
4607
4608static int spu_register_ahash(struct iproc_alg_s *driver_alg)
4609{
4610 struct spu_hw *spu = &iproc_priv.spu;
4611 struct ahash_alg *hash = &driver_alg->alg.hash;
4612 int err;
4613
4614
4615 if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4616 (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
4617 (spu->spu_type == SPU_TYPE_SPUM))
4618 return 0;
4619
4620
4621 if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
4622 (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
4623 return 0;
4624
4625 hash->halg.base.cra_module = THIS_MODULE;
4626 hash->halg.base.cra_priority = hash_pri;
4627 hash->halg.base.cra_alignmask = 0;
4628 hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4629 hash->halg.base.cra_init = ahash_cra_init;
4630 hash->halg.base.cra_exit = generic_cra_exit;
4631 hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4632 hash->halg.statesize = sizeof(struct spu_hash_export_s);
4633
4634 if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
4635 hash->init = ahash_init;
4636 hash->update = ahash_update;
4637 hash->final = ahash_final;
4638 hash->finup = ahash_finup;
4639 hash->digest = ahash_digest;
4640 if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4641 ((driver_alg->auth_info.mode == HASH_MODE_XCBC) ||
4642 (driver_alg->auth_info.mode == HASH_MODE_CMAC))) {
4643 hash->setkey = ahash_setkey;
4644 }
4645 } else {
4646 hash->setkey = ahash_hmac_setkey;
4647 hash->init = ahash_hmac_init;
4648 hash->update = ahash_hmac_update;
4649 hash->final = ahash_hmac_final;
4650 hash->finup = ahash_hmac_finup;
4651 hash->digest = ahash_hmac_digest;
4652 }
4653 hash->export = ahash_export;
4654 hash->import = ahash_import;
4655
4656 err = crypto_register_ahash(hash);
4657
4658 if (err == 0)
4659 driver_alg->registered = true;
4660 pr_debug(" registered ahash %s\n",
4661 hash->halg.base.cra_driver_name);
4662 return err;
4663}
4664
4665static int spu_register_aead(struct iproc_alg_s *driver_alg)
4666{
4667 struct aead_alg *aead = &driver_alg->alg.aead;
4668 int err;
4669
4670 aead->base.cra_module = THIS_MODULE;
4671 aead->base.cra_priority = aead_pri;
4672 aead->base.cra_alignmask = 0;
4673 aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4674
4675 aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
4676
4677 aead->setauthsize = aead_setauthsize;
4678 aead->encrypt = aead_encrypt;
4679 aead->decrypt = aead_decrypt;
4680 aead->init = aead_cra_init;
4681 aead->exit = aead_cra_exit;
4682
4683 err = crypto_register_aead(aead);
4684
4685 if (err == 0)
4686 driver_alg->registered = true;
4687 pr_debug(" registered aead %s\n", aead->base.cra_driver_name);
4688 return err;
4689}
4690
4691
4692static int spu_algs_register(struct device *dev)
4693{
4694 int i, j;
4695 int err;
4696
4697 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4698 switch (driver_algs[i].type) {
4699 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4700 err = spu_register_ablkcipher(&driver_algs[i]);
4701 break;
4702 case CRYPTO_ALG_TYPE_AHASH:
4703 err = spu_register_ahash(&driver_algs[i]);
4704 break;
4705 case CRYPTO_ALG_TYPE_AEAD:
4706 err = spu_register_aead(&driver_algs[i]);
4707 break;
4708 default:
4709 dev_err(dev,
4710 "iproc-crypto: unknown alg type: %d",
4711 driver_algs[i].type);
4712 err = -EINVAL;
4713 }
4714
4715 if (err) {
4716 dev_err(dev, "alg registration failed with error %d\n",
4717 err);
4718 goto err_algs;
4719 }
4720 }
4721
4722 return 0;
4723
4724err_algs:
4725 for (j = 0; j < i; j++) {
4726
4727 if (!driver_algs[j].registered)
4728 continue;
4729 switch (driver_algs[j].type) {
4730 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4731 crypto_unregister_alg(&driver_algs[j].alg.crypto);
4732 driver_algs[j].registered = false;
4733 break;
4734 case CRYPTO_ALG_TYPE_AHASH:
4735 crypto_unregister_ahash(&driver_algs[j].alg.hash);
4736 driver_algs[j].registered = false;
4737 break;
4738 case CRYPTO_ALG_TYPE_AEAD:
4739 crypto_unregister_aead(&driver_algs[j].alg.aead);
4740 driver_algs[j].registered = false;
4741 break;
4742 }
4743 }
4744 return err;
4745}
4746
4747
4748
4749static struct spu_type_subtype spum_ns2_types = {
4750 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
4751};
4752
4753static struct spu_type_subtype spum_nsp_types = {
4754 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
4755};
4756
4757static struct spu_type_subtype spu2_types = {
4758 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
4759};
4760
4761static struct spu_type_subtype spu2_v2_types = {
4762 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
4763};
4764
4765static const struct of_device_id bcm_spu_dt_ids[] = {
4766 {
4767 .compatible = "brcm,spum-crypto",
4768 .data = &spum_ns2_types,
4769 },
4770 {
4771 .compatible = "brcm,spum-nsp-crypto",
4772 .data = &spum_nsp_types,
4773 },
4774 {
4775 .compatible = "brcm,spu2-crypto",
4776 .data = &spu2_types,
4777 },
4778 {
4779 .compatible = "brcm,spu2-v2-crypto",
4780 .data = &spu2_v2_types,
4781 },
4782 { }
4783};
4784
4785MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
4786
4787static int spu_dt_read(struct platform_device *pdev)
4788{
4789 struct device *dev = &pdev->dev;
4790 struct spu_hw *spu = &iproc_priv.spu;
4791 struct resource *spu_ctrl_regs;
4792 const struct spu_type_subtype *matched_spu_type;
4793 struct device_node *dn = pdev->dev.of_node;
4794 int err, i;
4795
4796
4797 spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
4798
4799 matched_spu_type = of_device_get_match_data(dev);
4800 if (!matched_spu_type) {
4801 dev_err(&pdev->dev, "Failed to match device\n");
4802 return -ENODEV;
4803 }
4804
4805 spu->spu_type = matched_spu_type->type;
4806 spu->spu_subtype = matched_spu_type->subtype;
4807
4808 i = 0;
4809 for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
4810 platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
4811
4812 spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
4813 if (IS_ERR(spu->reg_vbase[i])) {
4814 err = PTR_ERR(spu->reg_vbase[i]);
4815 dev_err(&pdev->dev, "Failed to map registers: %d\n",
4816 err);
4817 spu->reg_vbase[i] = NULL;
4818 return err;
4819 }
4820 }
4821 spu->num_spu = i;
4822 dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
4823
4824 return 0;
4825}
4826
4827int bcm_spu_probe(struct platform_device *pdev)
4828{
4829 struct device *dev = &pdev->dev;
4830 struct spu_hw *spu = &iproc_priv.spu;
4831 int err = 0;
4832
4833 iproc_priv.pdev = pdev;
4834 platform_set_drvdata(iproc_priv.pdev,
4835 &iproc_priv);
4836
4837 err = spu_dt_read(pdev);
4838 if (err < 0)
4839 goto failure;
4840
4841 err = spu_mb_init(&pdev->dev);
4842 if (err < 0)
4843 goto failure;
4844
4845 if (spu->spu_type == SPU_TYPE_SPUM)
4846 iproc_priv.bcm_hdr_len = 8;
4847 else if (spu->spu_type == SPU_TYPE_SPU2)
4848 iproc_priv.bcm_hdr_len = 0;
4849
4850 spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype);
4851
4852 spu_counters_init();
4853
4854 spu_setup_debugfs();
4855
4856 err = spu_algs_register(dev);
4857 if (err < 0)
4858 goto fail_reg;
4859
4860 return 0;
4861
4862fail_reg:
4863 spu_free_debugfs();
4864failure:
4865 spu_mb_release(pdev);
4866 dev_err(dev, "%s failed with error %d.\n", __func__, err);
4867
4868 return err;
4869}
4870
4871int bcm_spu_remove(struct platform_device *pdev)
4872{
4873 int i;
4874 struct device *dev = &pdev->dev;
4875 char *cdn;
4876
4877 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4878
4879
4880
4881
4882
4883 if (!driver_algs[i].registered)
4884 continue;
4885
4886 switch (driver_algs[i].type) {
4887 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4888 crypto_unregister_alg(&driver_algs[i].alg.crypto);
4889 dev_dbg(dev, " unregistered cipher %s\n",
4890 driver_algs[i].alg.crypto.cra_driver_name);
4891 driver_algs[i].registered = false;
4892 break;
4893 case CRYPTO_ALG_TYPE_AHASH:
4894 crypto_unregister_ahash(&driver_algs[i].alg.hash);
4895 cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
4896 dev_dbg(dev, " unregistered hash %s\n", cdn);
4897 driver_algs[i].registered = false;
4898 break;
4899 case CRYPTO_ALG_TYPE_AEAD:
4900 crypto_unregister_aead(&driver_algs[i].alg.aead);
4901 dev_dbg(dev, " unregistered aead %s\n",
4902 driver_algs[i].alg.aead.base.cra_driver_name);
4903 driver_algs[i].registered = false;
4904 break;
4905 }
4906 }
4907 spu_free_debugfs();
4908 spu_mb_release(pdev);
4909 return 0;
4910}
4911
4912
4913
4914static struct platform_driver bcm_spu_pdriver = {
4915 .driver = {
4916 .name = "brcm-spu-crypto",
4917 .of_match_table = of_match_ptr(bcm_spu_dt_ids),
4918 },
4919 .probe = bcm_spu_probe,
4920 .remove = bcm_spu_remove,
4921};
4922module_platform_driver(bcm_spu_pdriver);
4923
4924MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
4925MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4926MODULE_LICENSE("GPL v2");
4927