1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/kernel.h>
22#include <linux/interrupt.h>
23#include <linux/platform_device.h>
24#include <linux/scatterlist.h>
25#include <linux/crypto.h>
26#include <linux/kthread.h>
27#include <linux/rtnetlink.h>
28#include <linux/sched.h>
29#include <linux/of_address.h>
30#include <linux/of_device.h>
31#include <linux/io.h>
32#include <linux/bitops.h>
33
34#include <crypto/algapi.h>
35#include <crypto/aead.h>
36#include <crypto/internal/aead.h>
37#include <crypto/aes.h>
38#include <crypto/des.h>
39#include <crypto/hmac.h>
40#include <crypto/sha.h>
41#include <crypto/md5.h>
42#include <crypto/authenc.h>
43#include <crypto/skcipher.h>
44#include <crypto/hash.h>
45#include <crypto/sha3.h>
46
47#include "util.h"
48#include "cipher.h"
49#include "spu.h"
50#include "spum.h"
51#include "spu2.h"
52
53
54
55struct device_private iproc_priv;
56
57
58
59int flow_debug_logging;
60module_param(flow_debug_logging, int, 0644);
61MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
62
63int packet_debug_logging;
64module_param(packet_debug_logging, int, 0644);
65MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
66
67int debug_logging_sleep;
68module_param(debug_logging_sleep, int, 0644);
69MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
70
71
72
73
74
75
76
77
78
79
80static int cipher_pri = 150;
81module_param(cipher_pri, int, 0644);
82MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
83
84static int hash_pri = 100;
85module_param(hash_pri, int, 0644);
86MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
87
88static int aead_pri = 150;
89module_param(aead_pri, int, 0644);
90MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
91
92
93
94
95
96
97
98
99char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
100
101
102
103
104#define BCM_HDR_LEN iproc_priv.bcm_hdr_len
105
106
107#define MBOX_SLEEP_MIN 800
108#define MBOX_SLEEP_MAX 1000
109
110
111
112
113
114
115
116static u8 select_channel(void)
117{
118 u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
119
120 return chan_idx % iproc_priv.spu.num_chan;
121}
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143static int
144spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
145 struct iproc_reqctx_s *rctx,
146 u8 rx_frag_num,
147 unsigned int chunksize, u32 stat_pad_len)
148{
149 struct spu_hw *spu = &iproc_priv.spu;
150 struct scatterlist *sg;
151 struct iproc_ctx_s *ctx = rctx->ctx;
152 u32 datalen;
153
154 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
155 rctx->gfp);
156 if (!mssg->spu.dst)
157 return -ENOMEM;
158
159 sg = mssg->spu.dst;
160 sg_init_table(sg, rx_frag_num);
161
162 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
163
164
165 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
166 spu->spu_xts_tweak_in_payload())
167 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
168 SPU_XTS_TWEAK_SIZE);
169
170
171 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
172 rctx->dst_nents, chunksize);
173 if (datalen < chunksize) {
174 pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
175 __func__, chunksize, datalen);
176 return -EFAULT;
177 }
178
179 if (ctx->cipher.alg == CIPHER_ALG_RC4)
180
181 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN);
182
183 if (stat_pad_len)
184 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
185
186 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
187 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
188
189 return 0;
190}
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211static int
212spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
213 struct iproc_reqctx_s *rctx,
214 u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
215{
216 struct spu_hw *spu = &iproc_priv.spu;
217 struct scatterlist *sg;
218 struct iproc_ctx_s *ctx = rctx->ctx;
219 u32 datalen;
220 u32 stat_len;
221
222 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
223 rctx->gfp);
224 if (unlikely(!mssg->spu.src))
225 return -ENOMEM;
226
227 sg = mssg->spu.src;
228 sg_init_table(sg, tx_frag_num);
229
230 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
231 BCM_HDR_LEN + ctx->spu_req_hdr_len);
232
233
234 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
235 spu->spu_xts_tweak_in_payload())
236 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
237
238
239 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
240 rctx->src_nents, chunksize);
241 if (unlikely(datalen < chunksize)) {
242 pr_err("%s(): failed to copy src sg to mbox msg",
243 __func__);
244 return -EFAULT;
245 }
246
247 if (pad_len)
248 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
249
250 stat_len = spu->spu_tx_status_len();
251 if (stat_len) {
252 memset(rctx->msg_buf.tx_stat, 0, stat_len);
253 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
254 }
255 return 0;
256}
257
258static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
259 u8 chan_idx)
260{
261 int err;
262 int retry_cnt = 0;
263 struct device *dev = &(iproc_priv.pdev->dev);
264
265 err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
266 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
267 while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
268
269
270
271
272 retry_cnt++;
273 usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
274 err = mbox_send_message(iproc_priv.mbox[chan_idx],
275 mssg);
276 atomic_inc(&iproc_priv.mb_no_spc);
277 }
278 }
279 if (err < 0) {
280 atomic_inc(&iproc_priv.mb_send_fail);
281 return err;
282 }
283
284
285 err = mssg->error;
286 if (unlikely(err < 0)) {
287 dev_err(dev, "message error %d", err);
288
289 }
290
291
292 mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
293 return err;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
315{
316 struct spu_hw *spu = &iproc_priv.spu;
317 struct crypto_async_request *areq = rctx->parent;
318 struct ablkcipher_request *req =
319 container_of(areq, struct ablkcipher_request, base);
320 struct iproc_ctx_s *ctx = rctx->ctx;
321 struct spu_cipher_parms cipher_parms;
322 int err = 0;
323 unsigned int chunksize = 0;
324 int remaining = 0;
325 int chunk_start;
326
327
328 u8 local_iv_ctr[MAX_IV_SIZE];
329 u32 stat_pad_len;
330 u32 pad_len;
331 bool update_key = false;
332 struct brcm_message *mssg;
333
334
335 u8 rx_frag_num = 2;
336 u8 tx_frag_num = 1;
337
338 flow_log("%s\n", __func__);
339
340 cipher_parms.alg = ctx->cipher.alg;
341 cipher_parms.mode = ctx->cipher.mode;
342 cipher_parms.type = ctx->cipher_type;
343 cipher_parms.key_len = ctx->enckeylen;
344 cipher_parms.key_buf = ctx->enckey;
345 cipher_parms.iv_buf = local_iv_ctr;
346 cipher_parms.iv_len = rctx->iv_ctr_len;
347
348 mssg = &rctx->mb_mssg;
349 chunk_start = rctx->src_sent;
350 remaining = rctx->total_todo - chunk_start;
351
352
353 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
354 (remaining > ctx->max_payload))
355 chunksize = ctx->max_payload;
356 else
357 chunksize = remaining;
358
359 rctx->src_sent += chunksize;
360 rctx->total_sent = rctx->src_sent;
361
362
363 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
364 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
365
366 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
367 rctx->is_encrypt && chunk_start)
368
369
370
371
372 sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
373 rctx->iv_ctr_len,
374 chunk_start - rctx->iv_ctr_len);
375
376 if (rctx->iv_ctr_len) {
377
378 __builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
379 rctx->iv_ctr_len);
380
381
382 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
383 !rctx->is_encrypt) {
384
385
386
387
388 sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
389 rctx->iv_ctr_len,
390 rctx->src_sent - rctx->iv_ctr_len);
391 } else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
392
393
394
395
396
397
398
399
400
401 add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
402 }
403 }
404
405 if (ctx->cipher.alg == CIPHER_ALG_RC4) {
406 rx_frag_num++;
407 if (chunk_start) {
408
409
410
411
412 cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak;
413 update_key = true;
414 cipher_parms.type = CIPHER_TYPE_UPDT;
415 } else if (!rctx->is_encrypt) {
416
417
418
419
420
421
422 update_key = true;
423 cipher_parms.type = CIPHER_TYPE_INIT;
424 }
425 }
426
427 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
428 flow_log("max_payload infinite\n");
429 else
430 flow_log("max_payload %u\n", ctx->max_payload);
431
432 flow_log("sent:%u start:%u remains:%u size:%u\n",
433 rctx->src_sent, chunk_start, remaining, chunksize);
434
435
436 memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
437 sizeof(rctx->msg_buf.bcm_spu_req_hdr));
438
439
440
441
442
443
444 spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
445 ctx->spu_req_hdr_len, !(rctx->is_encrypt),
446 &cipher_parms, update_key, chunksize);
447
448 atomic64_add(chunksize, &iproc_priv.bytes_out);
449
450 stat_pad_len = spu->spu_wordalign_padlen(chunksize);
451 if (stat_pad_len)
452 rx_frag_num++;
453 pad_len = stat_pad_len;
454 if (pad_len) {
455 tx_frag_num++;
456 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
457 0, ctx->auth.alg, ctx->auth.mode,
458 rctx->total_sent, stat_pad_len);
459 }
460
461 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
462 ctx->spu_req_hdr_len);
463 packet_log("payload:\n");
464 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
465 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
466
467
468
469
470
471 memset(mssg, 0, sizeof(*mssg));
472 mssg->type = BRCM_MESSAGE_SPU;
473 mssg->ctx = rctx;
474
475
476 rx_frag_num += rctx->dst_nents;
477
478 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
479 spu->spu_xts_tweak_in_payload())
480 rx_frag_num++;
481
482 err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
483 stat_pad_len);
484 if (err)
485 return err;
486
487
488 tx_frag_num += rctx->src_nents;
489 if (spu->spu_tx_status_len())
490 tx_frag_num++;
491
492 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
493 spu->spu_xts_tweak_in_payload())
494 tx_frag_num++;
495
496 err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
497 pad_len);
498 if (err)
499 return err;
500
501 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
502 if (unlikely(err < 0))
503 return err;
504
505 return -EINPROGRESS;
506}
507
508
509
510
511
512
513static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx)
514{
515 struct spu_hw *spu = &iproc_priv.spu;
516#ifdef DEBUG
517 struct crypto_async_request *areq = rctx->parent;
518 struct ablkcipher_request *req = ablkcipher_request_cast(areq);
519#endif
520 struct iproc_ctx_s *ctx = rctx->ctx;
521 u32 payload_len;
522
523
524 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
525
526
527
528
529
530 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
531 spu->spu_xts_tweak_in_payload() &&
532 (payload_len >= SPU_XTS_TWEAK_SIZE))
533 payload_len -= SPU_XTS_TWEAK_SIZE;
534
535 atomic64_add(payload_len, &iproc_priv.bytes_in);
536
537 flow_log("%s() offset: %u, bd_len: %u BD:\n",
538 __func__, rctx->total_received, payload_len);
539
540 dump_sg(req->dst, rctx->total_received, payload_len);
541 if (ctx->cipher.alg == CIPHER_ALG_RC4)
542 packet_dump(" supdt ", rctx->msg_buf.c.supdt_tweak,
543 SPU_SUPDT_LEN);
544
545 rctx->total_received += payload_len;
546 if (rctx->total_received == rctx->total_todo) {
547 atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
548 atomic_inc(
549 &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
550 }
551}
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572static int
573spu_ahash_rx_sg_create(struct brcm_message *mssg,
574 struct iproc_reqctx_s *rctx,
575 u8 rx_frag_num, unsigned int digestsize,
576 u32 stat_pad_len)
577{
578 struct spu_hw *spu = &iproc_priv.spu;
579 struct scatterlist *sg;
580 struct iproc_ctx_s *ctx = rctx->ctx;
581
582 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
583 rctx->gfp);
584 if (!mssg->spu.dst)
585 return -ENOMEM;
586
587 sg = mssg->spu.dst;
588 sg_init_table(sg, rx_frag_num);
589
590 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
591
592
593 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
594
595 if (stat_pad_len)
596 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
597
598 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
599 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
600 return 0;
601}
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624static int
625spu_ahash_tx_sg_create(struct brcm_message *mssg,
626 struct iproc_reqctx_s *rctx,
627 u8 tx_frag_num,
628 u32 spu_hdr_len,
629 unsigned int hash_carry_len,
630 unsigned int new_data_len, u32 pad_len)
631{
632 struct spu_hw *spu = &iproc_priv.spu;
633 struct scatterlist *sg;
634 u32 datalen;
635 u32 stat_len;
636
637 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
638 rctx->gfp);
639 if (!mssg->spu.src)
640 return -ENOMEM;
641
642 sg = mssg->spu.src;
643 sg_init_table(sg, tx_frag_num);
644
645 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
646 BCM_HDR_LEN + spu_hdr_len);
647
648 if (hash_carry_len)
649 sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
650
651 if (new_data_len) {
652
653 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
654 rctx->src_nents, new_data_len);
655 if (datalen < new_data_len) {
656 pr_err("%s(): failed to copy src sg to mbox msg",
657 __func__);
658 return -EFAULT;
659 }
660 }
661
662 if (pad_len)
663 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
664
665 stat_len = spu->spu_tx_status_len();
666 if (stat_len) {
667 memset(rctx->msg_buf.tx_stat, 0, stat_len);
668 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
669 }
670
671 return 0;
672}
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700static int handle_ahash_req(struct iproc_reqctx_s *rctx)
701{
702 struct spu_hw *spu = &iproc_priv.spu;
703 struct crypto_async_request *areq = rctx->parent;
704 struct ahash_request *req = ahash_request_cast(areq);
705 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
706 struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
707 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
708 struct iproc_ctx_s *ctx = rctx->ctx;
709
710
711 unsigned int nbytes_to_hash = 0;
712 int err = 0;
713 unsigned int chunksize = 0;
714
715
716
717
718 unsigned int new_data_len;
719
720 unsigned int chunk_start = 0;
721 u32 db_size;
722 int pad_len = 0;
723 u32 data_pad_len = 0;
724 u32 stat_pad_len = 0;
725 struct brcm_message *mssg;
726 struct spu_request_opts req_opts;
727 struct spu_cipher_parms cipher_parms;
728 struct spu_hash_parms hash_parms;
729 struct spu_aead_parms aead_parms;
730 unsigned int local_nbuf;
731 u32 spu_hdr_len;
732 unsigned int digestsize;
733 u16 rem = 0;
734
735
736
737
738
739 u8 rx_frag_num = 3;
740 u8 tx_frag_num = 1;
741
742 flow_log("total_todo %u, total_sent %u\n",
743 rctx->total_todo, rctx->total_sent);
744
745 memset(&req_opts, 0, sizeof(req_opts));
746 memset(&cipher_parms, 0, sizeof(cipher_parms));
747 memset(&hash_parms, 0, sizeof(hash_parms));
748 memset(&aead_parms, 0, sizeof(aead_parms));
749
750 req_opts.bd_suppress = true;
751 hash_parms.alg = ctx->auth.alg;
752 hash_parms.mode = ctx->auth.mode;
753 hash_parms.type = HASH_TYPE_NONE;
754 hash_parms.key_buf = (u8 *)ctx->authkey;
755 hash_parms.key_len = ctx->authkeylen;
756
757
758
759
760
761
762
763
764 cipher_parms.type = ctx->cipher_type;
765
766 mssg = &rctx->mb_mssg;
767 chunk_start = rctx->src_sent;
768
769
770
771
772
773 nbytes_to_hash = rctx->total_todo - rctx->total_sent;
774 chunksize = nbytes_to_hash;
775 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
776 (chunksize > ctx->max_payload))
777 chunksize = ctx->max_payload;
778
779
780
781
782
783
784 if (!rctx->is_final) {
785 u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
786 u16 new_len;
787
788 rem = chunksize % blocksize;
789 if (rem) {
790
791 chunksize -= rem;
792 if (chunksize == 0) {
793
794 new_len = rem - rctx->hash_carry_len;
795 sg_copy_part_to_buf(req->src, dest, new_len,
796 rctx->src_sent);
797 rctx->hash_carry_len = rem;
798 flow_log("Exiting with hash carry len: %u\n",
799 rctx->hash_carry_len);
800 packet_dump(" buf: ",
801 rctx->hash_carry,
802 rctx->hash_carry_len);
803 return -EAGAIN;
804 }
805 }
806 }
807
808
809 local_nbuf = rctx->hash_carry_len;
810 rctx->hash_carry_len = 0;
811 if (local_nbuf)
812 tx_frag_num++;
813 new_data_len = chunksize - local_nbuf;
814
815
816 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
817 new_data_len);
818
819
820 if (hash_parms.alg == HASH_ALG_AES)
821 hash_parms.type = (enum hash_type)cipher_parms.type;
822 else
823 hash_parms.type = spu->spu_hash_type(rctx->total_sent);
824
825 digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
826 hash_parms.type);
827 hash_parms.digestsize = digestsize;
828
829
830 rctx->total_sent += chunksize;
831
832 rctx->src_sent += new_data_len;
833
834 if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
835 hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
836 hash_parms.mode,
837 chunksize,
838 blocksize);
839
840
841
842
843
844 if ((hash_parms.type == HASH_TYPE_UPDT) &&
845 (hash_parms.alg != HASH_ALG_AES)) {
846 hash_parms.key_buf = rctx->incr_hash;
847 hash_parms.key_len = digestsize;
848 }
849
850 atomic64_add(chunksize, &iproc_priv.bytes_out);
851
852 flow_log("%s() final: %u nbuf: %u ",
853 __func__, rctx->is_final, local_nbuf);
854
855 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
856 flow_log("max_payload infinite\n");
857 else
858 flow_log("max_payload %u\n", ctx->max_payload);
859
860 flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
861
862
863 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
864
865 hash_parms.prebuf_len = local_nbuf;
866 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
867 BCM_HDR_LEN,
868 &req_opts, &cipher_parms,
869 &hash_parms, &aead_parms,
870 new_data_len);
871
872 if (spu_hdr_len == 0) {
873 pr_err("Failed to create SPU request header\n");
874 return -EFAULT;
875 }
876
877
878
879
880
881 data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
882 db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
883 0, 0, hash_parms.pad_len);
884 if (spu->spu_tx_status_len())
885 stat_pad_len = spu->spu_wordalign_padlen(db_size);
886 if (stat_pad_len)
887 rx_frag_num++;
888 pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
889 if (pad_len) {
890 tx_frag_num++;
891 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
892 hash_parms.pad_len, ctx->auth.alg,
893 ctx->auth.mode, rctx->total_sent,
894 stat_pad_len);
895 }
896
897 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
898 spu_hdr_len);
899 packet_dump(" prebuf: ", rctx->hash_carry, local_nbuf);
900 flow_log("Data:\n");
901 dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
902 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
903
904
905
906
907
908 memset(mssg, 0, sizeof(*mssg));
909 mssg->type = BRCM_MESSAGE_SPU;
910 mssg->ctx = rctx;
911
912
913 err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
914 stat_pad_len);
915 if (err)
916 return err;
917
918
919 tx_frag_num += rctx->src_nents;
920 if (spu->spu_tx_status_len())
921 tx_frag_num++;
922 err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
923 local_nbuf, new_data_len, pad_len);
924 if (err)
925 return err;
926
927 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
928 if (unlikely(err < 0))
929 return err;
930
931 return -EINPROGRESS;
932}
933
934
935
936
937
938
939
940
941
942
943
944static int spu_hmac_outer_hash(struct ahash_request *req,
945 struct iproc_ctx_s *ctx)
946{
947 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
948 unsigned int blocksize =
949 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
950 int rc;
951
952 switch (ctx->auth.alg) {
953 case HASH_ALG_MD5:
954 rc = do_shash("md5", req->result, ctx->opad, blocksize,
955 req->result, ctx->digestsize, NULL, 0);
956 break;
957 case HASH_ALG_SHA1:
958 rc = do_shash("sha1", req->result, ctx->opad, blocksize,
959 req->result, ctx->digestsize, NULL, 0);
960 break;
961 case HASH_ALG_SHA224:
962 rc = do_shash("sha224", req->result, ctx->opad, blocksize,
963 req->result, ctx->digestsize, NULL, 0);
964 break;
965 case HASH_ALG_SHA256:
966 rc = do_shash("sha256", req->result, ctx->opad, blocksize,
967 req->result, ctx->digestsize, NULL, 0);
968 break;
969 case HASH_ALG_SHA384:
970 rc = do_shash("sha384", req->result, ctx->opad, blocksize,
971 req->result, ctx->digestsize, NULL, 0);
972 break;
973 case HASH_ALG_SHA512:
974 rc = do_shash("sha512", req->result, ctx->opad, blocksize,
975 req->result, ctx->digestsize, NULL, 0);
976 break;
977 default:
978 pr_err("%s() Error : unknown hmac type\n", __func__);
979 rc = -EINVAL;
980 }
981 return rc;
982}
983
984
985
986
987
988
989
990
991static int ahash_req_done(struct iproc_reqctx_s *rctx)
992{
993 struct spu_hw *spu = &iproc_priv.spu;
994 struct crypto_async_request *areq = rctx->parent;
995 struct ahash_request *req = ahash_request_cast(areq);
996 struct iproc_ctx_s *ctx = rctx->ctx;
997 int err;
998
999 memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
1000
1001 if (spu->spu_type == SPU_TYPE_SPUM) {
1002
1003
1004
1005 if (ctx->auth.alg == HASH_ALG_MD5) {
1006 __swab32s((u32 *)req->result);
1007 __swab32s(((u32 *)req->result) + 1);
1008 __swab32s(((u32 *)req->result) + 2);
1009 __swab32s(((u32 *)req->result) + 3);
1010 __swab32s(((u32 *)req->result) + 4);
1011 }
1012 }
1013
1014 flow_dump(" digest ", req->result, ctx->digestsize);
1015
1016
1017 if (rctx->is_sw_hmac) {
1018 err = spu_hmac_outer_hash(req, ctx);
1019 if (err < 0)
1020 return err;
1021 flow_dump(" hmac: ", req->result, ctx->digestsize);
1022 }
1023
1024 if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
1025 atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
1026 atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
1027 } else {
1028 atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
1029 atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
1030 }
1031
1032 return 0;
1033}
1034
1035
1036
1037
1038
1039
1040
1041static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
1042{
1043 struct iproc_ctx_s *ctx = rctx->ctx;
1044#ifdef DEBUG
1045 struct crypto_async_request *areq = rctx->parent;
1046 struct ahash_request *req = ahash_request_cast(areq);
1047 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1048 unsigned int blocksize =
1049 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
1050#endif
1051
1052
1053
1054
1055 memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1056
1057 flow_log("%s() blocksize:%u digestsize:%u\n",
1058 __func__, blocksize, ctx->digestsize);
1059
1060 atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
1061
1062 if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1063 ahash_req_done(rctx);
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090static int spu_aead_rx_sg_create(struct brcm_message *mssg,
1091 struct aead_request *req,
1092 struct iproc_reqctx_s *rctx,
1093 u8 rx_frag_num,
1094 unsigned int assoc_len,
1095 u32 ret_iv_len, unsigned int resp_len,
1096 unsigned int digestsize, u32 stat_pad_len)
1097{
1098 struct spu_hw *spu = &iproc_priv.spu;
1099 struct scatterlist *sg;
1100 struct iproc_ctx_s *ctx = rctx->ctx;
1101 u32 datalen;
1102 u32 assoc_buf_len;
1103 u8 data_padlen = 0;
1104
1105 if (ctx->is_rfc4543) {
1106
1107 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1108 assoc_len + resp_len);
1109 assoc_buf_len = assoc_len;
1110 } else {
1111 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1112 resp_len);
1113 assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
1114 assoc_len, ret_iv_len,
1115 rctx->is_encrypt);
1116 }
1117
1118 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1119
1120 data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
1121 resp_len +
1122 data_padlen);
1123
1124 if (data_padlen)
1125
1126 rx_frag_num++;
1127
1128 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
1129 rctx->gfp);
1130 if (!mssg->spu.dst)
1131 return -ENOMEM;
1132
1133 sg = mssg->spu.dst;
1134 sg_init_table(sg, rx_frag_num);
1135
1136
1137 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1138
1139 if (assoc_buf_len) {
1140
1141
1142
1143
1144 memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1145 sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1146 }
1147
1148 if (resp_len) {
1149
1150
1151
1152
1153 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1154 rctx->dst_nents, resp_len);
1155 if (datalen < (resp_len)) {
1156 pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1157 __func__, resp_len, datalen);
1158 return -EFAULT;
1159 }
1160 }
1161
1162
1163 if (data_padlen) {
1164 memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1165 sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1166 }
1167
1168
1169 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1170
1171 flow_log("stat_pad_len %u\n", stat_pad_len);
1172 if (stat_pad_len) {
1173 memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1174 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1175 }
1176
1177 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1178 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1179
1180 return 0;
1181}
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210static int spu_aead_tx_sg_create(struct brcm_message *mssg,
1211 struct iproc_reqctx_s *rctx,
1212 u8 tx_frag_num,
1213 u32 spu_hdr_len,
1214 struct scatterlist *assoc,
1215 unsigned int assoc_len,
1216 int assoc_nents,
1217 unsigned int aead_iv_len,
1218 unsigned int chunksize,
1219 u32 aad_pad_len, u32 pad_len, bool incl_icv)
1220{
1221 struct spu_hw *spu = &iproc_priv.spu;
1222 struct scatterlist *sg;
1223 struct scatterlist *assoc_sg = assoc;
1224 struct iproc_ctx_s *ctx = rctx->ctx;
1225 u32 datalen;
1226 u32 written;
1227 u32 assoc_offset = 0;
1228 u32 stat_len;
1229
1230 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
1231 rctx->gfp);
1232 if (!mssg->spu.src)
1233 return -ENOMEM;
1234
1235 sg = mssg->spu.src;
1236 sg_init_table(sg, tx_frag_num);
1237
1238 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1239 BCM_HDR_LEN + spu_hdr_len);
1240
1241 if (assoc_len) {
1242
1243 written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
1244 assoc_nents, assoc_len);
1245 if (written < assoc_len) {
1246 pr_err("%s(): failed to copy assoc sg to mbox msg",
1247 __func__);
1248 return -EFAULT;
1249 }
1250 }
1251
1252 if (aead_iv_len)
1253 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1254
1255 if (aad_pad_len) {
1256 memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1257 sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1258 }
1259
1260 datalen = chunksize;
1261 if ((chunksize > ctx->digestsize) && incl_icv)
1262 datalen -= ctx->digestsize;
1263 if (datalen) {
1264
1265 written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1266 rctx->src_nents, datalen);
1267 if (written < datalen) {
1268 pr_err("%s(): failed to copy src sg to mbox msg",
1269 __func__);
1270 return -EFAULT;
1271 }
1272 }
1273
1274 if (pad_len) {
1275 memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1276 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1277 }
1278
1279 if (incl_icv)
1280 sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1281
1282 stat_len = spu->spu_tx_status_len();
1283 if (stat_len) {
1284 memset(rctx->msg_buf.tx_stat, 0, stat_len);
1285 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1286 }
1287 return 0;
1288}
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307static int handle_aead_req(struct iproc_reqctx_s *rctx)
1308{
1309 struct spu_hw *spu = &iproc_priv.spu;
1310 struct crypto_async_request *areq = rctx->parent;
1311 struct aead_request *req = container_of(areq,
1312 struct aead_request, base);
1313 struct iproc_ctx_s *ctx = rctx->ctx;
1314 int err;
1315 unsigned int chunksize;
1316 unsigned int resp_len;
1317 u32 spu_hdr_len;
1318 u32 db_size;
1319 u32 stat_pad_len;
1320 u32 pad_len;
1321 struct brcm_message *mssg;
1322 struct spu_request_opts req_opts;
1323 struct spu_cipher_parms cipher_parms;
1324 struct spu_hash_parms hash_parms;
1325 struct spu_aead_parms aead_parms;
1326 int assoc_nents = 0;
1327 bool incl_icv = false;
1328 unsigned int digestsize = ctx->digestsize;
1329
1330
1331
1332 u8 rx_frag_num = 2;
1333 u8 tx_frag_num = 1;
1334
1335
1336 chunksize = rctx->total_todo;
1337
1338 flow_log("%s: chunksize %u\n", __func__, chunksize);
1339
1340 memset(&req_opts, 0, sizeof(req_opts));
1341 memset(&hash_parms, 0, sizeof(hash_parms));
1342 memset(&aead_parms, 0, sizeof(aead_parms));
1343
1344 req_opts.is_inbound = !(rctx->is_encrypt);
1345 req_opts.auth_first = ctx->auth_first;
1346 req_opts.is_aead = true;
1347 req_opts.is_esp = ctx->is_esp;
1348
1349 cipher_parms.alg = ctx->cipher.alg;
1350 cipher_parms.mode = ctx->cipher.mode;
1351 cipher_parms.type = ctx->cipher_type;
1352 cipher_parms.key_buf = ctx->enckey;
1353 cipher_parms.key_len = ctx->enckeylen;
1354 cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1355 cipher_parms.iv_len = rctx->iv_ctr_len;
1356
1357 hash_parms.alg = ctx->auth.alg;
1358 hash_parms.mode = ctx->auth.mode;
1359 hash_parms.type = HASH_TYPE_NONE;
1360 hash_parms.key_buf = (u8 *)ctx->authkey;
1361 hash_parms.key_len = ctx->authkeylen;
1362 hash_parms.digestsize = digestsize;
1363
1364 if ((ctx->auth.alg == HASH_ALG_SHA224) &&
1365 (ctx->authkeylen < SHA224_DIGEST_SIZE))
1366 hash_parms.key_len = SHA224_DIGEST_SIZE;
1367
1368 aead_parms.assoc_size = req->assoclen;
1369 if (ctx->is_esp && !ctx->is_rfc4543) {
1370
1371
1372
1373
1374
1375 aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
1376
1377 if (rctx->is_encrypt) {
1378 aead_parms.return_iv = true;
1379 aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
1380 aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
1381 }
1382 } else {
1383 aead_parms.ret_iv_len = 0;
1384 }
1385
1386
1387
1388
1389
1390
1391
1392 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1393 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1394 if (aead_parms.assoc_size)
1395 assoc_nents = spu_sg_count(rctx->assoc, 0,
1396 aead_parms.assoc_size);
1397
1398 mssg = &rctx->mb_mssg;
1399
1400 rctx->total_sent = chunksize;
1401 rctx->src_sent = chunksize;
1402 if (spu->spu_assoc_resp_len(ctx->cipher.mode,
1403 aead_parms.assoc_size,
1404 aead_parms.ret_iv_len,
1405 rctx->is_encrypt))
1406 rx_frag_num++;
1407
1408 aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
1409 rctx->iv_ctr_len);
1410
1411 if (ctx->auth.alg == HASH_ALG_AES)
1412 hash_parms.type = (enum hash_type)ctx->cipher_type;
1413
1414
1415 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1416 aead_parms.assoc_size);
1417
1418
1419 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1420 chunksize);
1421
1422 if (ctx->cipher.mode == CIPHER_MODE_CCM) {
1423
1424
1425
1426
1427 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
1428 ctx->cipher.mode,
1429 aead_parms.assoc_size + 2);
1430
1431
1432
1433
1434
1435 if (!rctx->is_encrypt)
1436 aead_parms.data_pad_len =
1437 spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1438 chunksize - digestsize);
1439
1440
1441 spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
1442 chunksize, rctx->is_encrypt,
1443 ctx->is_esp);
1444 }
1445
1446 if (ctx->is_rfc4543) {
1447
1448
1449
1450
1451 aead_parms.aad_pad_len = 0;
1452 if (!rctx->is_encrypt)
1453 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1454 ctx->cipher.mode,
1455 aead_parms.assoc_size + chunksize -
1456 digestsize);
1457 else
1458 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1459 ctx->cipher.mode,
1460 aead_parms.assoc_size + chunksize);
1461
1462 req_opts.is_rfc4543 = true;
1463 }
1464
1465 if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1466 incl_icv = true;
1467 tx_frag_num++;
1468
1469 sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1470 req->assoclen + rctx->total_sent -
1471 digestsize);
1472 }
1473
1474 atomic64_add(chunksize, &iproc_priv.bytes_out);
1475
1476 flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
1477
1478
1479 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1480
1481 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1482 BCM_HDR_LEN, &req_opts,
1483 &cipher_parms, &hash_parms,
1484 &aead_parms, chunksize);
1485
1486
1487 db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
1488 chunksize, aead_parms.aad_pad_len,
1489 aead_parms.data_pad_len, 0);
1490
1491 stat_pad_len = spu->spu_wordalign_padlen(db_size);
1492
1493 if (stat_pad_len)
1494 rx_frag_num++;
1495 pad_len = aead_parms.data_pad_len + stat_pad_len;
1496 if (pad_len) {
1497 tx_frag_num++;
1498 spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1499 aead_parms.data_pad_len, 0,
1500 ctx->auth.alg, ctx->auth.mode,
1501 rctx->total_sent, stat_pad_len);
1502 }
1503
1504 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1505 spu_hdr_len);
1506 dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1507 packet_dump(" aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1508 packet_log("BD:\n");
1509 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1510 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1511
1512
1513
1514
1515
1516 memset(mssg, 0, sizeof(*mssg));
1517 mssg->type = BRCM_MESSAGE_SPU;
1518 mssg->ctx = rctx;
1519
1520
1521 rx_frag_num += rctx->dst_nents;
1522 resp_len = chunksize;
1523
1524
1525
1526
1527
1528
1529 rx_frag_num++;
1530
1531 if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
1532 (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1533
1534
1535
1536
1537 resp_len -= ctx->digestsize;
1538 if (resp_len == 0)
1539
1540 rx_frag_num -= rctx->dst_nents;
1541 }
1542
1543 err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1544 aead_parms.assoc_size,
1545 aead_parms.ret_iv_len, resp_len, digestsize,
1546 stat_pad_len);
1547 if (err)
1548 return err;
1549
1550
1551 tx_frag_num += rctx->src_nents;
1552 tx_frag_num += assoc_nents;
1553 if (aead_parms.aad_pad_len)
1554 tx_frag_num++;
1555 if (aead_parms.iv_len)
1556 tx_frag_num++;
1557 if (spu->spu_tx_status_len())
1558 tx_frag_num++;
1559 err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1560 rctx->assoc, aead_parms.assoc_size,
1561 assoc_nents, aead_parms.iv_len, chunksize,
1562 aead_parms.aad_pad_len, pad_len, incl_icv);
1563 if (err)
1564 return err;
1565
1566 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
1567 if (unlikely(err < 0))
1568 return err;
1569
1570 return -EINPROGRESS;
1571}
1572
1573
1574
1575
1576
1577static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1578{
1579 struct spu_hw *spu = &iproc_priv.spu;
1580 struct crypto_async_request *areq = rctx->parent;
1581 struct aead_request *req = container_of(areq,
1582 struct aead_request, base);
1583 struct iproc_ctx_s *ctx = rctx->ctx;
1584 u32 payload_len;
1585 unsigned int icv_offset;
1586 u32 result_len;
1587
1588
1589 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1590 flow_log("payload_len %u\n", payload_len);
1591
1592
1593 atomic64_add(payload_len, &iproc_priv.bytes_in);
1594
1595 if (req->assoclen)
1596 packet_dump(" assoc_data ", rctx->msg_buf.a.resp_aad,
1597 req->assoclen);
1598
1599
1600
1601
1602
1603
1604 result_len = req->cryptlen;
1605 if (rctx->is_encrypt) {
1606 icv_offset = req->assoclen + rctx->total_sent;
1607 packet_dump(" ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1608 flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
1609 sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1610 ctx->digestsize, icv_offset);
1611 result_len += ctx->digestsize;
1612 }
1613
1614 packet_log("response data: ");
1615 dump_sg(req->dst, req->assoclen, result_len);
1616
1617 atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
1618 if (ctx->cipher.alg == CIPHER_ALG_AES) {
1619 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1620 atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
1621 else if (ctx->cipher.mode == CIPHER_MODE_GCM)
1622 atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
1623 else
1624 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1625 } else {
1626 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1627 }
1628}
1629
1630
1631
1632
1633
1634
1635
1636
1637static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1638{
1639
1640 struct brcm_message *mssg = &rctx->mb_mssg;
1641
1642 kfree(mssg->spu.src);
1643 kfree(mssg->spu.dst);
1644 memset(mssg, 0, sizeof(struct brcm_message));
1645}
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655static void finish_req(struct iproc_reqctx_s *rctx, int err)
1656{
1657 struct crypto_async_request *areq = rctx->parent;
1658
1659 flow_log("%s() err:%d\n\n", __func__, err);
1660
1661
1662 spu_chunk_cleanup(rctx);
1663
1664 if (areq)
1665 areq->complete(areq, err);
1666}
1667
1668
1669
1670
1671
1672
1673static void spu_rx_callback(struct mbox_client *cl, void *msg)
1674{
1675 struct spu_hw *spu = &iproc_priv.spu;
1676 struct brcm_message *mssg = msg;
1677 struct iproc_reqctx_s *rctx;
1678 struct iproc_ctx_s *ctx;
1679 struct crypto_async_request *areq;
1680 int err = 0;
1681
1682 rctx = mssg->ctx;
1683 if (unlikely(!rctx)) {
1684
1685 pr_err("%s(): no request context", __func__);
1686 err = -EFAULT;
1687 goto cb_finish;
1688 }
1689 areq = rctx->parent;
1690 ctx = rctx->ctx;
1691
1692
1693 err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1694 if (err != 0) {
1695 if (err == SPU_INVALID_ICV)
1696 atomic_inc(&iproc_priv.bad_icv);
1697 err = -EBADMSG;
1698 goto cb_finish;
1699 }
1700
1701
1702 switch (rctx->ctx->alg->type) {
1703 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1704 handle_ablkcipher_resp(rctx);
1705 break;
1706 case CRYPTO_ALG_TYPE_AHASH:
1707 handle_ahash_resp(rctx);
1708 break;
1709 case CRYPTO_ALG_TYPE_AEAD:
1710 handle_aead_resp(rctx);
1711 break;
1712 default:
1713 err = -EINVAL;
1714 goto cb_finish;
1715 }
1716
1717
1718
1719
1720
1721 if (rctx->total_sent < rctx->total_todo) {
1722
1723 spu_chunk_cleanup(rctx);
1724
1725 switch (rctx->ctx->alg->type) {
1726 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1727 err = handle_ablkcipher_req(rctx);
1728 break;
1729 case CRYPTO_ALG_TYPE_AHASH:
1730 err = handle_ahash_req(rctx);
1731 if (err == -EAGAIN)
1732
1733
1734
1735
1736 err = 0;
1737 break;
1738 case CRYPTO_ALG_TYPE_AEAD:
1739 err = handle_aead_req(rctx);
1740 break;
1741 default:
1742 err = -EINVAL;
1743 }
1744
1745 if (err == -EINPROGRESS)
1746
1747 return;
1748 }
1749
1750cb_finish:
1751 finish_req(rctx, err);
1752}
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
1766{
1767 struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req);
1768 struct iproc_ctx_s *ctx =
1769 crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
1770 int err;
1771
1772 flow_log("%s() enc:%u\n", __func__, encrypt);
1773
1774 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1775 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1776 rctx->parent = &req->base;
1777 rctx->is_encrypt = encrypt;
1778 rctx->bd_suppress = false;
1779 rctx->total_todo = req->nbytes;
1780 rctx->src_sent = 0;
1781 rctx->total_sent = 0;
1782 rctx->total_received = 0;
1783 rctx->ctx = ctx;
1784
1785
1786 rctx->src_sg = req->src;
1787 rctx->src_nents = 0;
1788 rctx->src_skip = 0;
1789 rctx->dst_sg = req->dst;
1790 rctx->dst_nents = 0;
1791 rctx->dst_skip = 0;
1792
1793 if (ctx->cipher.mode == CIPHER_MODE_CBC ||
1794 ctx->cipher.mode == CIPHER_MODE_CTR ||
1795 ctx->cipher.mode == CIPHER_MODE_OFB ||
1796 ctx->cipher.mode == CIPHER_MODE_XTS ||
1797 ctx->cipher.mode == CIPHER_MODE_GCM ||
1798 ctx->cipher.mode == CIPHER_MODE_CCM) {
1799 rctx->iv_ctr_len =
1800 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
1801 memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len);
1802 } else {
1803 rctx->iv_ctr_len = 0;
1804 }
1805
1806
1807 rctx->chan_idx = select_channel();
1808 err = handle_ablkcipher_req(rctx);
1809 if (err != -EINPROGRESS)
1810
1811 spu_chunk_cleanup(rctx);
1812
1813 return err;
1814}
1815
1816static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1817 unsigned int keylen)
1818{
1819 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1820 u32 tmp[DES_EXPKEY_WORDS];
1821
1822 if (keylen == DES_KEY_SIZE) {
1823 if (des_ekey(tmp, key) == 0) {
1824 if (crypto_ablkcipher_get_flags(cipher) &
1825 CRYPTO_TFM_REQ_WEAK_KEY) {
1826 u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
1827
1828 crypto_ablkcipher_set_flags(cipher, flags);
1829 return -EINVAL;
1830 }
1831 }
1832
1833 ctx->cipher_type = CIPHER_TYPE_DES;
1834 } else {
1835 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1836 return -EINVAL;
1837 }
1838 return 0;
1839}
1840
1841static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1842 unsigned int keylen)
1843{
1844 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1845
1846 if (keylen == (DES_KEY_SIZE * 3)) {
1847 u32 flags;
1848 int ret;
1849
1850 flags = crypto_ablkcipher_get_flags(cipher);
1851 ret = __des3_verify_key(&flags, key);
1852 if (unlikely(ret)) {
1853 crypto_ablkcipher_set_flags(cipher, flags);
1854 return ret;
1855 }
1856
1857 ctx->cipher_type = CIPHER_TYPE_3DES;
1858 } else {
1859 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1860 return -EINVAL;
1861 }
1862 return 0;
1863}
1864
1865static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1866 unsigned int keylen)
1867{
1868 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1869
1870 if (ctx->cipher.mode == CIPHER_MODE_XTS)
1871
1872 keylen = keylen / 2;
1873
1874 switch (keylen) {
1875 case AES_KEYSIZE_128:
1876 ctx->cipher_type = CIPHER_TYPE_AES128;
1877 break;
1878 case AES_KEYSIZE_192:
1879 ctx->cipher_type = CIPHER_TYPE_AES192;
1880 break;
1881 case AES_KEYSIZE_256:
1882 ctx->cipher_type = CIPHER_TYPE_AES256;
1883 break;
1884 default:
1885 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1886 return -EINVAL;
1887 }
1888 WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
1889 ((ctx->max_payload % AES_BLOCK_SIZE) != 0));
1890 return 0;
1891}
1892
1893static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1894 unsigned int keylen)
1895{
1896 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1897 int i;
1898
1899 ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
1900
1901 ctx->enckey[0] = 0x00;
1902 ctx->enckey[1] = 0x00;
1903 ctx->enckey[2] = 0x00;
1904 ctx->enckey[3] = 0x00;
1905 for (i = 0; i < ARC4_MAX_KEY_SIZE; i++)
1906 ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen];
1907
1908 ctx->cipher_type = CIPHER_TYPE_INIT;
1909
1910 return 0;
1911}
1912
1913static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1914 unsigned int keylen)
1915{
1916 struct spu_hw *spu = &iproc_priv.spu;
1917 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1918 struct spu_cipher_parms cipher_parms;
1919 u32 alloc_len = 0;
1920 int err;
1921
1922 flow_log("ablkcipher_setkey() keylen: %d\n", keylen);
1923 flow_dump(" key: ", key, keylen);
1924
1925 switch (ctx->cipher.alg) {
1926 case CIPHER_ALG_DES:
1927 err = des_setkey(cipher, key, keylen);
1928 break;
1929 case CIPHER_ALG_3DES:
1930 err = threedes_setkey(cipher, key, keylen);
1931 break;
1932 case CIPHER_ALG_AES:
1933 err = aes_setkey(cipher, key, keylen);
1934 break;
1935 case CIPHER_ALG_RC4:
1936 err = rc4_setkey(cipher, key, keylen);
1937 break;
1938 default:
1939 pr_err("%s() Error: unknown cipher alg\n", __func__);
1940 err = -EINVAL;
1941 }
1942 if (err)
1943 return err;
1944
1945
1946 if (ctx->cipher.alg != CIPHER_ALG_RC4) {
1947 memcpy(ctx->enckey, key, keylen);
1948 ctx->enckeylen = keylen;
1949 }
1950
1951 if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
1952 (ctx->cipher.mode == CIPHER_MODE_XTS)) {
1953 unsigned int xts_keylen = keylen / 2;
1954
1955 memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
1956 memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
1957 }
1958
1959 if (spu->spu_type == SPU_TYPE_SPUM)
1960 alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
1961 else if (spu->spu_type == SPU_TYPE_SPU2)
1962 alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
1963 memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
1964 cipher_parms.iv_buf = NULL;
1965 cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher);
1966 flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
1967
1968 cipher_parms.alg = ctx->cipher.alg;
1969 cipher_parms.mode = ctx->cipher.mode;
1970 cipher_parms.type = ctx->cipher_type;
1971 cipher_parms.key_buf = ctx->enckey;
1972 cipher_parms.key_len = ctx->enckeylen;
1973
1974
1975 memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1976 ctx->spu_req_hdr_len =
1977 spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
1978 &cipher_parms);
1979
1980 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
1981 ctx->enckeylen,
1982 false);
1983
1984 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
1985
1986 return 0;
1987}
1988
1989static int ablkcipher_encrypt(struct ablkcipher_request *req)
1990{
1991 flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes);
1992
1993 return ablkcipher_enqueue(req, true);
1994}
1995
1996static int ablkcipher_decrypt(struct ablkcipher_request *req)
1997{
1998 flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes);
1999 return ablkcipher_enqueue(req, false);
2000}
2001
2002static int ahash_enqueue(struct ahash_request *req)
2003{
2004 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2005 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2006 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2007 int err = 0;
2008 const char *alg_name;
2009
2010 flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
2011
2012 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2013 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2014 rctx->parent = &req->base;
2015 rctx->ctx = ctx;
2016 rctx->bd_suppress = true;
2017 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2018
2019
2020 rctx->src_sg = req->src;
2021 rctx->src_skip = 0;
2022 rctx->src_nents = 0;
2023 rctx->dst_sg = NULL;
2024 rctx->dst_skip = 0;
2025 rctx->dst_nents = 0;
2026
2027
2028 if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
2029 (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
2030 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2031 flow_log("Doing %sfinal %s zero-len hash request in software\n",
2032 rctx->is_final ? "" : "non-", alg_name);
2033 err = do_shash((unsigned char *)alg_name, req->result,
2034 NULL, 0, NULL, 0, ctx->authkey,
2035 ctx->authkeylen);
2036 if (err < 0)
2037 flow_log("Hash request failed with error %d\n", err);
2038 return err;
2039 }
2040
2041 rctx->chan_idx = select_channel();
2042
2043 err = handle_ahash_req(rctx);
2044 if (err != -EINPROGRESS)
2045
2046 spu_chunk_cleanup(rctx);
2047
2048 if (err == -EAGAIN)
2049
2050
2051
2052
2053 err = 0;
2054
2055 return err;
2056}
2057
2058static int __ahash_init(struct ahash_request *req)
2059{
2060 struct spu_hw *spu = &iproc_priv.spu;
2061 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2062 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2063 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2064
2065 flow_log("%s()\n", __func__);
2066
2067
2068 rctx->hash_carry_len = 0;
2069 rctx->is_final = 0;
2070
2071 rctx->total_todo = 0;
2072 rctx->src_sent = 0;
2073 rctx->total_sent = 0;
2074 rctx->total_received = 0;
2075
2076 ctx->digestsize = crypto_ahash_digestsize(tfm);
2077
2078 WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
2079
2080 rctx->is_sw_hmac = false;
2081
2082 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
2083 true);
2084
2085 return 0;
2086}
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2102{
2103 struct spu_hw *spu = &iproc_priv.spu;
2104
2105 if (spu->spu_type == SPU_TYPE_SPU2)
2106 return true;
2107
2108 if ((ctx->auth.alg == HASH_ALG_AES) &&
2109 (ctx->auth.mode == HASH_MODE_XCBC))
2110 return true;
2111
2112
2113 return false;
2114}
2115
2116static int ahash_init(struct ahash_request *req)
2117{
2118 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2119 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2120 const char *alg_name;
2121 struct crypto_shash *hash;
2122 int ret;
2123 gfp_t gfp;
2124
2125 if (spu_no_incr_hash(ctx)) {
2126
2127
2128
2129
2130
2131 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2132 hash = crypto_alloc_shash(alg_name, 0, 0);
2133 if (IS_ERR(hash)) {
2134 ret = PTR_ERR(hash);
2135 goto err;
2136 }
2137
2138 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2139 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2140 ctx->shash = kmalloc(sizeof(*ctx->shash) +
2141 crypto_shash_descsize(hash), gfp);
2142 if (!ctx->shash) {
2143 ret = -ENOMEM;
2144 goto err_hash;
2145 }
2146 ctx->shash->tfm = hash;
2147 ctx->shash->flags = 0;
2148
2149
2150 if (ctx->authkeylen > 0) {
2151 ret = crypto_shash_setkey(hash, ctx->authkey,
2152 ctx->authkeylen);
2153 if (ret)
2154 goto err_shash;
2155 }
2156
2157
2158 ret = crypto_shash_init(ctx->shash);
2159 if (ret)
2160 goto err_shash;
2161 } else {
2162
2163 ret = __ahash_init(req);
2164 }
2165
2166 return ret;
2167
2168err_shash:
2169 kfree(ctx->shash);
2170err_hash:
2171 crypto_free_shash(hash);
2172err:
2173 return ret;
2174}
2175
2176static int __ahash_update(struct ahash_request *req)
2177{
2178 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2179
2180 flow_log("ahash_update() nbytes:%u\n", req->nbytes);
2181
2182 if (!req->nbytes)
2183 return 0;
2184 rctx->total_todo += req->nbytes;
2185 rctx->src_sent = 0;
2186
2187 return ahash_enqueue(req);
2188}
2189
2190static int ahash_update(struct ahash_request *req)
2191{
2192 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2193 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2194 u8 *tmpbuf;
2195 int ret;
2196 int nents;
2197 gfp_t gfp;
2198
2199 if (spu_no_incr_hash(ctx)) {
2200
2201
2202
2203
2204
2205 if (req->src)
2206 nents = sg_nents(req->src);
2207 else
2208 return -EINVAL;
2209
2210
2211 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2212 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2213 tmpbuf = kmalloc(req->nbytes, gfp);
2214 if (!tmpbuf)
2215 return -ENOMEM;
2216
2217 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2218 req->nbytes) {
2219 kfree(tmpbuf);
2220 return -EINVAL;
2221 }
2222
2223
2224 ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
2225 kfree(tmpbuf);
2226 } else {
2227
2228 ret = __ahash_update(req);
2229 }
2230
2231 return ret;
2232}
2233
2234static int __ahash_final(struct ahash_request *req)
2235{
2236 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2237
2238 flow_log("ahash_final() nbytes:%u\n", req->nbytes);
2239
2240 rctx->is_final = 1;
2241
2242 return ahash_enqueue(req);
2243}
2244
2245static int ahash_final(struct ahash_request *req)
2246{
2247 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2248 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2249 int ret;
2250
2251 if (spu_no_incr_hash(ctx)) {
2252
2253
2254
2255
2256
2257 ret = crypto_shash_final(ctx->shash, req->result);
2258
2259
2260 crypto_free_shash(ctx->shash->tfm);
2261 kfree(ctx->shash);
2262
2263 } else {
2264
2265 ret = __ahash_final(req);
2266 }
2267
2268 return ret;
2269}
2270
2271static int __ahash_finup(struct ahash_request *req)
2272{
2273 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2274
2275 flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
2276
2277 rctx->total_todo += req->nbytes;
2278 rctx->src_sent = 0;
2279 rctx->is_final = 1;
2280
2281 return ahash_enqueue(req);
2282}
2283
2284static int ahash_finup(struct ahash_request *req)
2285{
2286 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2287 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2288 u8 *tmpbuf;
2289 int ret;
2290 int nents;
2291 gfp_t gfp;
2292
2293 if (spu_no_incr_hash(ctx)) {
2294
2295
2296
2297
2298
2299 if (req->src) {
2300 nents = sg_nents(req->src);
2301 } else {
2302 ret = -EINVAL;
2303 goto ahash_finup_exit;
2304 }
2305
2306
2307 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2308 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2309 tmpbuf = kmalloc(req->nbytes, gfp);
2310 if (!tmpbuf) {
2311 ret = -ENOMEM;
2312 goto ahash_finup_exit;
2313 }
2314
2315 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2316 req->nbytes) {
2317 ret = -EINVAL;
2318 goto ahash_finup_free;
2319 }
2320
2321
2322 ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
2323 req->result);
2324 } else {
2325
2326 return __ahash_finup(req);
2327 }
2328ahash_finup_free:
2329 kfree(tmpbuf);
2330
2331ahash_finup_exit:
2332
2333 crypto_free_shash(ctx->shash->tfm);
2334 kfree(ctx->shash);
2335 return ret;
2336}
2337
2338static int ahash_digest(struct ahash_request *req)
2339{
2340 int err = 0;
2341
2342 flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
2343
2344
2345 err = __ahash_init(req);
2346 if (!err)
2347 err = __ahash_finup(req);
2348
2349 return err;
2350}
2351
2352static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2353 unsigned int keylen)
2354{
2355 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2356
2357 flow_log("%s() ahash:%p key:%p keylen:%u\n",
2358 __func__, ahash, key, keylen);
2359 flow_dump(" key: ", key, keylen);
2360
2361 if (ctx->auth.alg == HASH_ALG_AES) {
2362 switch (keylen) {
2363 case AES_KEYSIZE_128:
2364 ctx->cipher_type = CIPHER_TYPE_AES128;
2365 break;
2366 case AES_KEYSIZE_192:
2367 ctx->cipher_type = CIPHER_TYPE_AES192;
2368 break;
2369 case AES_KEYSIZE_256:
2370 ctx->cipher_type = CIPHER_TYPE_AES256;
2371 break;
2372 default:
2373 pr_err("%s() Error: Invalid key length\n", __func__);
2374 return -EINVAL;
2375 }
2376 } else {
2377 pr_err("%s() Error: unknown hash alg\n", __func__);
2378 return -EINVAL;
2379 }
2380 memcpy(ctx->authkey, key, keylen);
2381 ctx->authkeylen = keylen;
2382
2383 return 0;
2384}
2385
2386static int ahash_export(struct ahash_request *req, void *out)
2387{
2388 const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2389 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
2390
2391 spu_exp->total_todo = rctx->total_todo;
2392 spu_exp->total_sent = rctx->total_sent;
2393 spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2394 memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2395 spu_exp->hash_carry_len = rctx->hash_carry_len;
2396 memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2397
2398 return 0;
2399}
2400
2401static int ahash_import(struct ahash_request *req, const void *in)
2402{
2403 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2404 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
2405
2406 rctx->total_todo = spu_exp->total_todo;
2407 rctx->total_sent = spu_exp->total_sent;
2408 rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2409 memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2410 rctx->hash_carry_len = spu_exp->hash_carry_len;
2411 memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2412
2413 return 0;
2414}
2415
2416static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
2417 unsigned int keylen)
2418{
2419 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2420 unsigned int blocksize =
2421 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
2422 unsigned int digestsize = crypto_ahash_digestsize(ahash);
2423 unsigned int index;
2424 int rc;
2425
2426 flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2427 __func__, ahash, key, keylen, blocksize, digestsize);
2428 flow_dump(" key: ", key, keylen);
2429
2430 if (keylen > blocksize) {
2431 switch (ctx->auth.alg) {
2432 case HASH_ALG_MD5:
2433 rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
2434 0, NULL, 0);
2435 break;
2436 case HASH_ALG_SHA1:
2437 rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
2438 0, NULL, 0);
2439 break;
2440 case HASH_ALG_SHA224:
2441 rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
2442 0, NULL, 0);
2443 break;
2444 case HASH_ALG_SHA256:
2445 rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
2446 0, NULL, 0);
2447 break;
2448 case HASH_ALG_SHA384:
2449 rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
2450 0, NULL, 0);
2451 break;
2452 case HASH_ALG_SHA512:
2453 rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
2454 0, NULL, 0);
2455 break;
2456 case HASH_ALG_SHA3_224:
2457 rc = do_shash("sha3-224", ctx->authkey, key, keylen,
2458 NULL, 0, NULL, 0);
2459 break;
2460 case HASH_ALG_SHA3_256:
2461 rc = do_shash("sha3-256", ctx->authkey, key, keylen,
2462 NULL, 0, NULL, 0);
2463 break;
2464 case HASH_ALG_SHA3_384:
2465 rc = do_shash("sha3-384", ctx->authkey, key, keylen,
2466 NULL, 0, NULL, 0);
2467 break;
2468 case HASH_ALG_SHA3_512:
2469 rc = do_shash("sha3-512", ctx->authkey, key, keylen,
2470 NULL, 0, NULL, 0);
2471 break;
2472 default:
2473 pr_err("%s() Error: unknown hash alg\n", __func__);
2474 return -EINVAL;
2475 }
2476 if (rc < 0) {
2477 pr_err("%s() Error %d computing shash for %s\n",
2478 __func__, rc, hash_alg_name[ctx->auth.alg]);
2479 return rc;
2480 }
2481 ctx->authkeylen = digestsize;
2482
2483 flow_log(" keylen > digestsize... hashed\n");
2484 flow_dump(" newkey: ", ctx->authkey, ctx->authkeylen);
2485 } else {
2486 memcpy(ctx->authkey, key, keylen);
2487 ctx->authkeylen = keylen;
2488 }
2489
2490
2491
2492
2493
2494
2495 if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
2496 memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
2497 memset(ctx->ipad + ctx->authkeylen, 0,
2498 blocksize - ctx->authkeylen);
2499 ctx->authkeylen = 0;
2500 memcpy(ctx->opad, ctx->ipad, blocksize);
2501
2502 for (index = 0; index < blocksize; index++) {
2503 ctx->ipad[index] ^= HMAC_IPAD_VALUE;
2504 ctx->opad[index] ^= HMAC_OPAD_VALUE;
2505 }
2506
2507 flow_dump(" ipad: ", ctx->ipad, blocksize);
2508 flow_dump(" opad: ", ctx->opad, blocksize);
2509 }
2510 ctx->digestsize = digestsize;
2511 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
2512
2513 return 0;
2514}
2515
2516static int ahash_hmac_init(struct ahash_request *req)
2517{
2518 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2519 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2520 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2521 unsigned int blocksize =
2522 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2523
2524 flow_log("ahash_hmac_init()\n");
2525
2526
2527 ahash_init(req);
2528
2529 if (!spu_no_incr_hash(ctx)) {
2530
2531 rctx->is_sw_hmac = true;
2532 ctx->auth.mode = HASH_MODE_HASH;
2533
2534 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2535 rctx->hash_carry_len = blocksize;
2536 rctx->total_todo += blocksize;
2537 }
2538
2539 return 0;
2540}
2541
2542static int ahash_hmac_update(struct ahash_request *req)
2543{
2544 flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
2545
2546 if (!req->nbytes)
2547 return 0;
2548
2549 return ahash_update(req);
2550}
2551
2552static int ahash_hmac_final(struct ahash_request *req)
2553{
2554 flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
2555
2556 return ahash_final(req);
2557}
2558
2559static int ahash_hmac_finup(struct ahash_request *req)
2560{
2561 flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
2562
2563 return ahash_finup(req);
2564}
2565
2566static int ahash_hmac_digest(struct ahash_request *req)
2567{
2568 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2569 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2570 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2571 unsigned int blocksize =
2572 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2573
2574 flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
2575
2576
2577 __ahash_init(req);
2578
2579 if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
2580
2581
2582
2583
2584
2585
2586
2587
2588 rctx->is_sw_hmac = false;
2589 ctx->auth.mode = HASH_MODE_HMAC;
2590 } else {
2591 rctx->is_sw_hmac = true;
2592 ctx->auth.mode = HASH_MODE_HASH;
2593
2594 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2595 rctx->hash_carry_len = blocksize;
2596 rctx->total_todo += blocksize;
2597 }
2598
2599 return __ahash_finup(req);
2600}
2601
2602
2603
2604static int aead_need_fallback(struct aead_request *req)
2605{
2606 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2607 struct spu_hw *spu = &iproc_priv.spu;
2608 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2609 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2610 u32 payload_len;
2611
2612
2613
2614
2615
2616 if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
2617 (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
2618 (req->assoclen == 0)) {
2619 if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2620 (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2621 flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2622 return 1;
2623 }
2624 }
2625
2626
2627 if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2628 (spu->spu_type == SPU_TYPE_SPUM) &&
2629 (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
2630 (ctx->digestsize != 16)) {
2631 flow_log("%s() AES CCM needs fallback for digest size %d\n",
2632 __func__, ctx->digestsize);
2633 return 1;
2634 }
2635
2636
2637
2638
2639
2640 if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2641 (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
2642 (req->assoclen == 0)) {
2643 flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2644 __func__);
2645 return 1;
2646 }
2647
2648 payload_len = req->cryptlen;
2649 if (spu->spu_type == SPU_TYPE_SPUM)
2650 payload_len += req->assoclen;
2651
2652 flow_log("%s() payload len: %u\n", __func__, payload_len);
2653
2654 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2655 return 0;
2656 else
2657 return payload_len > ctx->max_payload;
2658}
2659
2660static void aead_complete(struct crypto_async_request *areq, int err)
2661{
2662 struct aead_request *req =
2663 container_of(areq, struct aead_request, base);
2664 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2665 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2666
2667 flow_log("%s() err:%d\n", __func__, err);
2668
2669 areq->tfm = crypto_aead_tfm(aead);
2670
2671 areq->complete = rctx->old_complete;
2672 areq->data = rctx->old_data;
2673
2674 areq->complete(areq, err);
2675}
2676
2677static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
2678{
2679 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2680 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
2681 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2682 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
2683 int err;
2684 u32 req_flags;
2685
2686 flow_log("%s() enc:%u\n", __func__, is_encrypt);
2687
2688 if (ctx->fallback_cipher) {
2689
2690 rctx->old_tfm = tfm;
2691 aead_request_set_tfm(req, ctx->fallback_cipher);
2692
2693
2694
2695
2696 rctx->old_complete = req->base.complete;
2697 rctx->old_data = req->base.data;
2698 req_flags = aead_request_flags(req);
2699 aead_request_set_callback(req, req_flags, aead_complete, req);
2700 err = is_encrypt ? crypto_aead_encrypt(req) :
2701 crypto_aead_decrypt(req);
2702
2703 if (err == 0) {
2704
2705
2706
2707
2708 aead_request_set_callback(req, req_flags,
2709 rctx->old_complete, req);
2710 req->base.data = rctx->old_data;
2711 aead_request_set_tfm(req, aead);
2712 flow_log("%s() fallback completed successfully\n\n",
2713 __func__);
2714 }
2715 } else {
2716 err = -EINVAL;
2717 }
2718
2719 return err;
2720}
2721
2722static int aead_enqueue(struct aead_request *req, bool is_encrypt)
2723{
2724 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2725 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2726 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2727 int err;
2728
2729 flow_log("%s() enc:%u\n", __func__, is_encrypt);
2730
2731 if (req->assoclen > MAX_ASSOC_SIZE) {
2732 pr_err
2733 ("%s() Error: associated data too long. (%u > %u bytes)\n",
2734 __func__, req->assoclen, MAX_ASSOC_SIZE);
2735 return -EINVAL;
2736 }
2737
2738 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2739 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2740 rctx->parent = &req->base;
2741 rctx->is_encrypt = is_encrypt;
2742 rctx->bd_suppress = false;
2743 rctx->total_todo = req->cryptlen;
2744 rctx->src_sent = 0;
2745 rctx->total_sent = 0;
2746 rctx->total_received = 0;
2747 rctx->is_sw_hmac = false;
2748 rctx->ctx = ctx;
2749 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2750
2751
2752 rctx->assoc = req->src;
2753
2754
2755
2756
2757
2758
2759 if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2760 &rctx->src_skip) < 0) {
2761 pr_err("%s() Error: Unable to find start of src data\n",
2762 __func__);
2763 return -EINVAL;
2764 }
2765
2766 rctx->src_nents = 0;
2767 rctx->dst_nents = 0;
2768 if (req->dst == req->src) {
2769 rctx->dst_sg = rctx->src_sg;
2770 rctx->dst_skip = rctx->src_skip;
2771 } else {
2772
2773
2774
2775
2776
2777 if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2778 &rctx->dst_skip) < 0) {
2779 pr_err("%s() Error: Unable to find start of dst data\n",
2780 __func__);
2781 return -EINVAL;
2782 }
2783 }
2784
2785 if (ctx->cipher.mode == CIPHER_MODE_CBC ||
2786 ctx->cipher.mode == CIPHER_MODE_CTR ||
2787 ctx->cipher.mode == CIPHER_MODE_OFB ||
2788 ctx->cipher.mode == CIPHER_MODE_XTS ||
2789 ctx->cipher.mode == CIPHER_MODE_GCM) {
2790 rctx->iv_ctr_len =
2791 ctx->salt_len +
2792 crypto_aead_ivsize(crypto_aead_reqtfm(req));
2793 } else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
2794 rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2795 } else {
2796 rctx->iv_ctr_len = 0;
2797 }
2798
2799 rctx->hash_carry_len = 0;
2800
2801 flow_log(" src sg: %p\n", req->src);
2802 flow_log(" rctx->src_sg: %p, src_skip %u\n",
2803 rctx->src_sg, rctx->src_skip);
2804 flow_log(" assoc: %p, assoclen %u\n", rctx->assoc, req->assoclen);
2805 flow_log(" dst sg: %p\n", req->dst);
2806 flow_log(" rctx->dst_sg: %p, dst_skip %u\n",
2807 rctx->dst_sg, rctx->dst_skip);
2808 flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len);
2809 flow_dump(" iv: ", req->iv, rctx->iv_ctr_len);
2810 flow_log(" authkeylen:%u\n", ctx->authkeylen);
2811 flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no");
2812
2813 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2814 flow_log(" max_payload infinite");
2815 else
2816 flow_log(" max_payload: %u\n", ctx->max_payload);
2817
2818 if (unlikely(aead_need_fallback(req)))
2819 return aead_do_fallback(req, is_encrypt);
2820
2821
2822
2823
2824
2825 if (rctx->iv_ctr_len) {
2826 if (ctx->salt_len)
2827 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2828 ctx->salt, ctx->salt_len);
2829 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2830 req->iv,
2831 rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2832 }
2833
2834 rctx->chan_idx = select_channel();
2835 err = handle_aead_req(rctx);
2836 if (err != -EINPROGRESS)
2837
2838 spu_chunk_cleanup(rctx);
2839
2840 return err;
2841}
2842
2843static int aead_authenc_setkey(struct crypto_aead *cipher,
2844 const u8 *key, unsigned int keylen)
2845{
2846 struct spu_hw *spu = &iproc_priv.spu;
2847 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2848 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2849 struct rtattr *rta = (void *)key;
2850 struct crypto_authenc_key_param *param;
2851 const u8 *origkey = key;
2852 const unsigned int origkeylen = keylen;
2853
2854 int ret = 0;
2855
2856 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2857 keylen);
2858 flow_dump(" key: ", key, keylen);
2859
2860 if (!RTA_OK(rta, keylen))
2861 goto badkey;
2862 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
2863 goto badkey;
2864 if (RTA_PAYLOAD(rta) < sizeof(*param))
2865 goto badkey;
2866
2867 param = RTA_DATA(rta);
2868 ctx->enckeylen = be32_to_cpu(param->enckeylen);
2869
2870 key += RTA_ALIGN(rta->rta_len);
2871 keylen -= RTA_ALIGN(rta->rta_len);
2872
2873 if (keylen < ctx->enckeylen)
2874 goto badkey;
2875 if (ctx->enckeylen > MAX_KEY_SIZE)
2876 goto badkey;
2877
2878 ctx->authkeylen = keylen - ctx->enckeylen;
2879
2880 if (ctx->authkeylen > MAX_KEY_SIZE)
2881 goto badkey;
2882
2883 memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
2884
2885 memset(ctx->authkey, 0, sizeof(ctx->authkey));
2886 memcpy(ctx->authkey, key, ctx->authkeylen);
2887
2888 switch (ctx->alg->cipher_info.alg) {
2889 case CIPHER_ALG_DES:
2890 if (ctx->enckeylen == DES_KEY_SIZE) {
2891 u32 tmp[DES_EXPKEY_WORDS];
2892 u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
2893
2894 if (des_ekey(tmp, key) == 0) {
2895 if (crypto_aead_get_flags(cipher) &
2896 CRYPTO_TFM_REQ_WEAK_KEY) {
2897 crypto_aead_set_flags(cipher, flags);
2898 return -EINVAL;
2899 }
2900 }
2901
2902 ctx->cipher_type = CIPHER_TYPE_DES;
2903 } else {
2904 goto badkey;
2905 }
2906 break;
2907 case CIPHER_ALG_3DES:
2908 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
2909 u32 flags;
2910
2911 flags = crypto_aead_get_flags(cipher);
2912 ret = __des3_verify_key(&flags, ctx->enckey);
2913 if (unlikely(ret)) {
2914 crypto_aead_set_flags(cipher, flags);
2915 return ret;
2916 }
2917
2918 ctx->cipher_type = CIPHER_TYPE_3DES;
2919 } else {
2920 crypto_aead_set_flags(cipher,
2921 CRYPTO_TFM_RES_BAD_KEY_LEN);
2922 return -EINVAL;
2923 }
2924 break;
2925 case CIPHER_ALG_AES:
2926 switch (ctx->enckeylen) {
2927 case AES_KEYSIZE_128:
2928 ctx->cipher_type = CIPHER_TYPE_AES128;
2929 break;
2930 case AES_KEYSIZE_192:
2931 ctx->cipher_type = CIPHER_TYPE_AES192;
2932 break;
2933 case AES_KEYSIZE_256:
2934 ctx->cipher_type = CIPHER_TYPE_AES256;
2935 break;
2936 default:
2937 goto badkey;
2938 }
2939 break;
2940 case CIPHER_ALG_RC4:
2941 ctx->cipher_type = CIPHER_TYPE_INIT;
2942 break;
2943 default:
2944 pr_err("%s() Error: Unknown cipher alg\n", __func__);
2945 return -EINVAL;
2946 }
2947
2948 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2949 ctx->authkeylen);
2950 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
2951 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
2952
2953
2954 if (ctx->fallback_cipher) {
2955 flow_log(" running fallback setkey()\n");
2956
2957 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2958 ctx->fallback_cipher->base.crt_flags |=
2959 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2960 ret =
2961 crypto_aead_setkey(ctx->fallback_cipher, origkey,
2962 origkeylen);
2963 if (ret) {
2964 flow_log(" fallback setkey() returned:%d\n", ret);
2965 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
2966 tfm->crt_flags |=
2967 (ctx->fallback_cipher->base.crt_flags &
2968 CRYPTO_TFM_RES_MASK);
2969 }
2970 }
2971
2972 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2973 ctx->enckeylen,
2974 false);
2975
2976 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2977
2978 return ret;
2979
2980badkey:
2981 ctx->enckeylen = 0;
2982 ctx->authkeylen = 0;
2983 ctx->digestsize = 0;
2984
2985 crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2986 return -EINVAL;
2987}
2988
2989static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
2990 const u8 *key, unsigned int keylen)
2991{
2992 struct spu_hw *spu = &iproc_priv.spu;
2993 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2994 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2995
2996 int ret = 0;
2997
2998 flow_log("%s() keylen:%u\n", __func__, keylen);
2999 flow_dump(" key: ", key, keylen);
3000
3001 if (!ctx->is_esp)
3002 ctx->digestsize = keylen;
3003
3004 ctx->enckeylen = keylen;
3005 ctx->authkeylen = 0;
3006
3007 switch (ctx->enckeylen) {
3008 case AES_KEYSIZE_128:
3009 ctx->cipher_type = CIPHER_TYPE_AES128;
3010 break;
3011 case AES_KEYSIZE_192:
3012 ctx->cipher_type = CIPHER_TYPE_AES192;
3013 break;
3014 case AES_KEYSIZE_256:
3015 ctx->cipher_type = CIPHER_TYPE_AES256;
3016 break;
3017 default:
3018 goto badkey;
3019 }
3020
3021 memcpy(ctx->enckey, key, ctx->enckeylen);
3022
3023 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
3024 ctx->authkeylen);
3025 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
3026 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
3027
3028
3029 if (ctx->fallback_cipher) {
3030 flow_log(" running fallback setkey()\n");
3031
3032 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
3033 ctx->fallback_cipher->base.crt_flags |=
3034 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
3035 ret = crypto_aead_setkey(ctx->fallback_cipher, key,
3036 keylen + ctx->salt_len);
3037 if (ret) {
3038 flow_log(" fallback setkey() returned:%d\n", ret);
3039 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
3040 tfm->crt_flags |=
3041 (ctx->fallback_cipher->base.crt_flags &
3042 CRYPTO_TFM_RES_MASK);
3043 }
3044 }
3045
3046 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
3047 ctx->enckeylen,
3048 false);
3049
3050 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
3051
3052 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
3053 ctx->authkeylen);
3054
3055 return ret;
3056
3057badkey:
3058 ctx->enckeylen = 0;
3059 ctx->authkeylen = 0;
3060 ctx->digestsize = 0;
3061
3062 crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
3063 return -EINVAL;
3064}
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
3078 const u8 *key, unsigned int keylen)
3079{
3080 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3081
3082 flow_log("%s\n", __func__);
3083
3084 if (keylen < GCM_ESP_SALT_SIZE)
3085 return -EINVAL;
3086
3087 ctx->salt_len = GCM_ESP_SALT_SIZE;
3088 ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3089 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3090 keylen -= GCM_ESP_SALT_SIZE;
3091 ctx->digestsize = GCM_ESP_DIGESTSIZE;
3092 ctx->is_esp = true;
3093 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3094
3095 return aead_gcm_ccm_setkey(cipher, key, keylen);
3096}
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
3110 const u8 *key, unsigned int keylen)
3111{
3112 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3113
3114 flow_log("%s\n", __func__);
3115
3116 if (keylen < GCM_ESP_SALT_SIZE)
3117 return -EINVAL;
3118
3119 ctx->salt_len = GCM_ESP_SALT_SIZE;
3120 ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3121 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3122 keylen -= GCM_ESP_SALT_SIZE;
3123 ctx->digestsize = GCM_ESP_DIGESTSIZE;
3124 ctx->is_esp = true;
3125 ctx->is_rfc4543 = true;
3126 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3127
3128 return aead_gcm_ccm_setkey(cipher, key, keylen);
3129}
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
3143 const u8 *key, unsigned int keylen)
3144{
3145 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3146
3147 flow_log("%s\n", __func__);
3148
3149 if (keylen < CCM_ESP_SALT_SIZE)
3150 return -EINVAL;
3151
3152 ctx->salt_len = CCM_ESP_SALT_SIZE;
3153 ctx->salt_offset = CCM_ESP_SALT_OFFSET;
3154 memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
3155 keylen -= CCM_ESP_SALT_SIZE;
3156 ctx->is_esp = true;
3157 flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
3158
3159 return aead_gcm_ccm_setkey(cipher, key, keylen);
3160}
3161
3162static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
3163{
3164 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3165 int ret = 0;
3166
3167 flow_log("%s() authkeylen:%u authsize:%u\n",
3168 __func__, ctx->authkeylen, authsize);
3169
3170 ctx->digestsize = authsize;
3171
3172
3173 if (ctx->fallback_cipher) {
3174 flow_log(" running fallback setauth()\n");
3175
3176 ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
3177 if (ret)
3178 flow_log(" fallback setauth() returned:%d\n", ret);
3179 }
3180
3181 return ret;
3182}
3183
3184static int aead_encrypt(struct aead_request *req)
3185{
3186 flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
3187 req->cryptlen);
3188 dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3189 flow_log(" assoc_len:%u\n", req->assoclen);
3190
3191 return aead_enqueue(req, true);
3192}
3193
3194static int aead_decrypt(struct aead_request *req)
3195{
3196 flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
3197 dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3198 flow_log(" assoc_len:%u\n", req->assoclen);
3199
3200 return aead_enqueue(req, false);
3201}
3202
3203
3204
3205static struct iproc_alg_s driver_algs[] = {
3206 {
3207 .type = CRYPTO_ALG_TYPE_AEAD,
3208 .alg.aead = {
3209 .base = {
3210 .cra_name = "gcm(aes)",
3211 .cra_driver_name = "gcm-aes-iproc",
3212 .cra_blocksize = AES_BLOCK_SIZE,
3213 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3214 },
3215 .setkey = aead_gcm_ccm_setkey,
3216 .ivsize = GCM_AES_IV_SIZE,
3217 .maxauthsize = AES_BLOCK_SIZE,
3218 },
3219 .cipher_info = {
3220 .alg = CIPHER_ALG_AES,
3221 .mode = CIPHER_MODE_GCM,
3222 },
3223 .auth_info = {
3224 .alg = HASH_ALG_AES,
3225 .mode = HASH_MODE_GCM,
3226 },
3227 .auth_first = 0,
3228 },
3229 {
3230 .type = CRYPTO_ALG_TYPE_AEAD,
3231 .alg.aead = {
3232 .base = {
3233 .cra_name = "ccm(aes)",
3234 .cra_driver_name = "ccm-aes-iproc",
3235 .cra_blocksize = AES_BLOCK_SIZE,
3236 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3237 },
3238 .setkey = aead_gcm_ccm_setkey,
3239 .ivsize = CCM_AES_IV_SIZE,
3240 .maxauthsize = AES_BLOCK_SIZE,
3241 },
3242 .cipher_info = {
3243 .alg = CIPHER_ALG_AES,
3244 .mode = CIPHER_MODE_CCM,
3245 },
3246 .auth_info = {
3247 .alg = HASH_ALG_AES,
3248 .mode = HASH_MODE_CCM,
3249 },
3250 .auth_first = 0,
3251 },
3252 {
3253 .type = CRYPTO_ALG_TYPE_AEAD,
3254 .alg.aead = {
3255 .base = {
3256 .cra_name = "rfc4106(gcm(aes))",
3257 .cra_driver_name = "gcm-aes-esp-iproc",
3258 .cra_blocksize = AES_BLOCK_SIZE,
3259 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3260 },
3261 .setkey = aead_gcm_esp_setkey,
3262 .ivsize = GCM_RFC4106_IV_SIZE,
3263 .maxauthsize = AES_BLOCK_SIZE,
3264 },
3265 .cipher_info = {
3266 .alg = CIPHER_ALG_AES,
3267 .mode = CIPHER_MODE_GCM,
3268 },
3269 .auth_info = {
3270 .alg = HASH_ALG_AES,
3271 .mode = HASH_MODE_GCM,
3272 },
3273 .auth_first = 0,
3274 },
3275 {
3276 .type = CRYPTO_ALG_TYPE_AEAD,
3277 .alg.aead = {
3278 .base = {
3279 .cra_name = "rfc4309(ccm(aes))",
3280 .cra_driver_name = "ccm-aes-esp-iproc",
3281 .cra_blocksize = AES_BLOCK_SIZE,
3282 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3283 },
3284 .setkey = aead_ccm_esp_setkey,
3285 .ivsize = CCM_AES_IV_SIZE,
3286 .maxauthsize = AES_BLOCK_SIZE,
3287 },
3288 .cipher_info = {
3289 .alg = CIPHER_ALG_AES,
3290 .mode = CIPHER_MODE_CCM,
3291 },
3292 .auth_info = {
3293 .alg = HASH_ALG_AES,
3294 .mode = HASH_MODE_CCM,
3295 },
3296 .auth_first = 0,
3297 },
3298 {
3299 .type = CRYPTO_ALG_TYPE_AEAD,
3300 .alg.aead = {
3301 .base = {
3302 .cra_name = "rfc4543(gcm(aes))",
3303 .cra_driver_name = "gmac-aes-esp-iproc",
3304 .cra_blocksize = AES_BLOCK_SIZE,
3305 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3306 },
3307 .setkey = rfc4543_gcm_esp_setkey,
3308 .ivsize = GCM_RFC4106_IV_SIZE,
3309 .maxauthsize = AES_BLOCK_SIZE,
3310 },
3311 .cipher_info = {
3312 .alg = CIPHER_ALG_AES,
3313 .mode = CIPHER_MODE_GCM,
3314 },
3315 .auth_info = {
3316 .alg = HASH_ALG_AES,
3317 .mode = HASH_MODE_GCM,
3318 },
3319 .auth_first = 0,
3320 },
3321 {
3322 .type = CRYPTO_ALG_TYPE_AEAD,
3323 .alg.aead = {
3324 .base = {
3325 .cra_name = "authenc(hmac(md5),cbc(aes))",
3326 .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
3327 .cra_blocksize = AES_BLOCK_SIZE,
3328 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3329 },
3330 .setkey = aead_authenc_setkey,
3331 .ivsize = AES_BLOCK_SIZE,
3332 .maxauthsize = MD5_DIGEST_SIZE,
3333 },
3334 .cipher_info = {
3335 .alg = CIPHER_ALG_AES,
3336 .mode = CIPHER_MODE_CBC,
3337 },
3338 .auth_info = {
3339 .alg = HASH_ALG_MD5,
3340 .mode = HASH_MODE_HMAC,
3341 },
3342 .auth_first = 0,
3343 },
3344 {
3345 .type = CRYPTO_ALG_TYPE_AEAD,
3346 .alg.aead = {
3347 .base = {
3348 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3349 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
3350 .cra_blocksize = AES_BLOCK_SIZE,
3351 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3352 },
3353 .setkey = aead_authenc_setkey,
3354 .ivsize = AES_BLOCK_SIZE,
3355 .maxauthsize = SHA1_DIGEST_SIZE,
3356 },
3357 .cipher_info = {
3358 .alg = CIPHER_ALG_AES,
3359 .mode = CIPHER_MODE_CBC,
3360 },
3361 .auth_info = {
3362 .alg = HASH_ALG_SHA1,
3363 .mode = HASH_MODE_HMAC,
3364 },
3365 .auth_first = 0,
3366 },
3367 {
3368 .type = CRYPTO_ALG_TYPE_AEAD,
3369 .alg.aead = {
3370 .base = {
3371 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3372 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
3373 .cra_blocksize = AES_BLOCK_SIZE,
3374 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3375 },
3376 .setkey = aead_authenc_setkey,
3377 .ivsize = AES_BLOCK_SIZE,
3378 .maxauthsize = SHA256_DIGEST_SIZE,
3379 },
3380 .cipher_info = {
3381 .alg = CIPHER_ALG_AES,
3382 .mode = CIPHER_MODE_CBC,
3383 },
3384 .auth_info = {
3385 .alg = HASH_ALG_SHA256,
3386 .mode = HASH_MODE_HMAC,
3387 },
3388 .auth_first = 0,
3389 },
3390 {
3391 .type = CRYPTO_ALG_TYPE_AEAD,
3392 .alg.aead = {
3393 .base = {
3394 .cra_name = "authenc(hmac(md5),cbc(des))",
3395 .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
3396 .cra_blocksize = DES_BLOCK_SIZE,
3397 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3398 },
3399 .setkey = aead_authenc_setkey,
3400 .ivsize = DES_BLOCK_SIZE,
3401 .maxauthsize = MD5_DIGEST_SIZE,
3402 },
3403 .cipher_info = {
3404 .alg = CIPHER_ALG_DES,
3405 .mode = CIPHER_MODE_CBC,
3406 },
3407 .auth_info = {
3408 .alg = HASH_ALG_MD5,
3409 .mode = HASH_MODE_HMAC,
3410 },
3411 .auth_first = 0,
3412 },
3413 {
3414 .type = CRYPTO_ALG_TYPE_AEAD,
3415 .alg.aead = {
3416 .base = {
3417 .cra_name = "authenc(hmac(sha1),cbc(des))",
3418 .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
3419 .cra_blocksize = DES_BLOCK_SIZE,
3420 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3421 },
3422 .setkey = aead_authenc_setkey,
3423 .ivsize = DES_BLOCK_SIZE,
3424 .maxauthsize = SHA1_DIGEST_SIZE,
3425 },
3426 .cipher_info = {
3427 .alg = CIPHER_ALG_DES,
3428 .mode = CIPHER_MODE_CBC,
3429 },
3430 .auth_info = {
3431 .alg = HASH_ALG_SHA1,
3432 .mode = HASH_MODE_HMAC,
3433 },
3434 .auth_first = 0,
3435 },
3436 {
3437 .type = CRYPTO_ALG_TYPE_AEAD,
3438 .alg.aead = {
3439 .base = {
3440 .cra_name = "authenc(hmac(sha224),cbc(des))",
3441 .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
3442 .cra_blocksize = DES_BLOCK_SIZE,
3443 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3444 },
3445 .setkey = aead_authenc_setkey,
3446 .ivsize = DES_BLOCK_SIZE,
3447 .maxauthsize = SHA224_DIGEST_SIZE,
3448 },
3449 .cipher_info = {
3450 .alg = CIPHER_ALG_DES,
3451 .mode = CIPHER_MODE_CBC,
3452 },
3453 .auth_info = {
3454 .alg = HASH_ALG_SHA224,
3455 .mode = HASH_MODE_HMAC,
3456 },
3457 .auth_first = 0,
3458 },
3459 {
3460 .type = CRYPTO_ALG_TYPE_AEAD,
3461 .alg.aead = {
3462 .base = {
3463 .cra_name = "authenc(hmac(sha256),cbc(des))",
3464 .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
3465 .cra_blocksize = DES_BLOCK_SIZE,
3466 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3467 },
3468 .setkey = aead_authenc_setkey,
3469 .ivsize = DES_BLOCK_SIZE,
3470 .maxauthsize = SHA256_DIGEST_SIZE,
3471 },
3472 .cipher_info = {
3473 .alg = CIPHER_ALG_DES,
3474 .mode = CIPHER_MODE_CBC,
3475 },
3476 .auth_info = {
3477 .alg = HASH_ALG_SHA256,
3478 .mode = HASH_MODE_HMAC,
3479 },
3480 .auth_first = 0,
3481 },
3482 {
3483 .type = CRYPTO_ALG_TYPE_AEAD,
3484 .alg.aead = {
3485 .base = {
3486 .cra_name = "authenc(hmac(sha384),cbc(des))",
3487 .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
3488 .cra_blocksize = DES_BLOCK_SIZE,
3489 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3490 },
3491 .setkey = aead_authenc_setkey,
3492 .ivsize = DES_BLOCK_SIZE,
3493 .maxauthsize = SHA384_DIGEST_SIZE,
3494 },
3495 .cipher_info = {
3496 .alg = CIPHER_ALG_DES,
3497 .mode = CIPHER_MODE_CBC,
3498 },
3499 .auth_info = {
3500 .alg = HASH_ALG_SHA384,
3501 .mode = HASH_MODE_HMAC,
3502 },
3503 .auth_first = 0,
3504 },
3505 {
3506 .type = CRYPTO_ALG_TYPE_AEAD,
3507 .alg.aead = {
3508 .base = {
3509 .cra_name = "authenc(hmac(sha512),cbc(des))",
3510 .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
3511 .cra_blocksize = DES_BLOCK_SIZE,
3512 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3513 },
3514 .setkey = aead_authenc_setkey,
3515 .ivsize = DES_BLOCK_SIZE,
3516 .maxauthsize = SHA512_DIGEST_SIZE,
3517 },
3518 .cipher_info = {
3519 .alg = CIPHER_ALG_DES,
3520 .mode = CIPHER_MODE_CBC,
3521 },
3522 .auth_info = {
3523 .alg = HASH_ALG_SHA512,
3524 .mode = HASH_MODE_HMAC,
3525 },
3526 .auth_first = 0,
3527 },
3528 {
3529 .type = CRYPTO_ALG_TYPE_AEAD,
3530 .alg.aead = {
3531 .base = {
3532 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3533 .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
3534 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3535 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3536 },
3537 .setkey = aead_authenc_setkey,
3538 .ivsize = DES3_EDE_BLOCK_SIZE,
3539 .maxauthsize = MD5_DIGEST_SIZE,
3540 },
3541 .cipher_info = {
3542 .alg = CIPHER_ALG_3DES,
3543 .mode = CIPHER_MODE_CBC,
3544 },
3545 .auth_info = {
3546 .alg = HASH_ALG_MD5,
3547 .mode = HASH_MODE_HMAC,
3548 },
3549 .auth_first = 0,
3550 },
3551 {
3552 .type = CRYPTO_ALG_TYPE_AEAD,
3553 .alg.aead = {
3554 .base = {
3555 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3556 .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
3557 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3558 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3559 },
3560 .setkey = aead_authenc_setkey,
3561 .ivsize = DES3_EDE_BLOCK_SIZE,
3562 .maxauthsize = SHA1_DIGEST_SIZE,
3563 },
3564 .cipher_info = {
3565 .alg = CIPHER_ALG_3DES,
3566 .mode = CIPHER_MODE_CBC,
3567 },
3568 .auth_info = {
3569 .alg = HASH_ALG_SHA1,
3570 .mode = HASH_MODE_HMAC,
3571 },
3572 .auth_first = 0,
3573 },
3574 {
3575 .type = CRYPTO_ALG_TYPE_AEAD,
3576 .alg.aead = {
3577 .base = {
3578 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
3579 .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
3580 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3581 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3582 },
3583 .setkey = aead_authenc_setkey,
3584 .ivsize = DES3_EDE_BLOCK_SIZE,
3585 .maxauthsize = SHA224_DIGEST_SIZE,
3586 },
3587 .cipher_info = {
3588 .alg = CIPHER_ALG_3DES,
3589 .mode = CIPHER_MODE_CBC,
3590 },
3591 .auth_info = {
3592 .alg = HASH_ALG_SHA224,
3593 .mode = HASH_MODE_HMAC,
3594 },
3595 .auth_first = 0,
3596 },
3597 {
3598 .type = CRYPTO_ALG_TYPE_AEAD,
3599 .alg.aead = {
3600 .base = {
3601 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
3602 .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
3603 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3604 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3605 },
3606 .setkey = aead_authenc_setkey,
3607 .ivsize = DES3_EDE_BLOCK_SIZE,
3608 .maxauthsize = SHA256_DIGEST_SIZE,
3609 },
3610 .cipher_info = {
3611 .alg = CIPHER_ALG_3DES,
3612 .mode = CIPHER_MODE_CBC,
3613 },
3614 .auth_info = {
3615 .alg = HASH_ALG_SHA256,
3616 .mode = HASH_MODE_HMAC,
3617 },
3618 .auth_first = 0,
3619 },
3620 {
3621 .type = CRYPTO_ALG_TYPE_AEAD,
3622 .alg.aead = {
3623 .base = {
3624 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
3625 .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
3626 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3627 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3628 },
3629 .setkey = aead_authenc_setkey,
3630 .ivsize = DES3_EDE_BLOCK_SIZE,
3631 .maxauthsize = SHA384_DIGEST_SIZE,
3632 },
3633 .cipher_info = {
3634 .alg = CIPHER_ALG_3DES,
3635 .mode = CIPHER_MODE_CBC,
3636 },
3637 .auth_info = {
3638 .alg = HASH_ALG_SHA384,
3639 .mode = HASH_MODE_HMAC,
3640 },
3641 .auth_first = 0,
3642 },
3643 {
3644 .type = CRYPTO_ALG_TYPE_AEAD,
3645 .alg.aead = {
3646 .base = {
3647 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
3648 .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
3649 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3650 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3651 },
3652 .setkey = aead_authenc_setkey,
3653 .ivsize = DES3_EDE_BLOCK_SIZE,
3654 .maxauthsize = SHA512_DIGEST_SIZE,
3655 },
3656 .cipher_info = {
3657 .alg = CIPHER_ALG_3DES,
3658 .mode = CIPHER_MODE_CBC,
3659 },
3660 .auth_info = {
3661 .alg = HASH_ALG_SHA512,
3662 .mode = HASH_MODE_HMAC,
3663 },
3664 .auth_first = 0,
3665 },
3666
3667
3668 {
3669 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3670 .alg.crypto = {
3671 .cra_name = "ecb(arc4)",
3672 .cra_driver_name = "ecb-arc4-iproc",
3673 .cra_blocksize = ARC4_BLOCK_SIZE,
3674 .cra_ablkcipher = {
3675 .min_keysize = ARC4_MIN_KEY_SIZE,
3676 .max_keysize = ARC4_MAX_KEY_SIZE,
3677 .ivsize = 0,
3678 }
3679 },
3680 .cipher_info = {
3681 .alg = CIPHER_ALG_RC4,
3682 .mode = CIPHER_MODE_NONE,
3683 },
3684 .auth_info = {
3685 .alg = HASH_ALG_NONE,
3686 .mode = HASH_MODE_NONE,
3687 },
3688 },
3689 {
3690 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3691 .alg.crypto = {
3692 .cra_name = "ofb(des)",
3693 .cra_driver_name = "ofb-des-iproc",
3694 .cra_blocksize = DES_BLOCK_SIZE,
3695 .cra_ablkcipher = {
3696 .min_keysize = DES_KEY_SIZE,
3697 .max_keysize = DES_KEY_SIZE,
3698 .ivsize = DES_BLOCK_SIZE,
3699 }
3700 },
3701 .cipher_info = {
3702 .alg = CIPHER_ALG_DES,
3703 .mode = CIPHER_MODE_OFB,
3704 },
3705 .auth_info = {
3706 .alg = HASH_ALG_NONE,
3707 .mode = HASH_MODE_NONE,
3708 },
3709 },
3710 {
3711 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3712 .alg.crypto = {
3713 .cra_name = "cbc(des)",
3714 .cra_driver_name = "cbc-des-iproc",
3715 .cra_blocksize = DES_BLOCK_SIZE,
3716 .cra_ablkcipher = {
3717 .min_keysize = DES_KEY_SIZE,
3718 .max_keysize = DES_KEY_SIZE,
3719 .ivsize = DES_BLOCK_SIZE,
3720 }
3721 },
3722 .cipher_info = {
3723 .alg = CIPHER_ALG_DES,
3724 .mode = CIPHER_MODE_CBC,
3725 },
3726 .auth_info = {
3727 .alg = HASH_ALG_NONE,
3728 .mode = HASH_MODE_NONE,
3729 },
3730 },
3731 {
3732 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3733 .alg.crypto = {
3734 .cra_name = "ecb(des)",
3735 .cra_driver_name = "ecb-des-iproc",
3736 .cra_blocksize = DES_BLOCK_SIZE,
3737 .cra_ablkcipher = {
3738 .min_keysize = DES_KEY_SIZE,
3739 .max_keysize = DES_KEY_SIZE,
3740 .ivsize = 0,
3741 }
3742 },
3743 .cipher_info = {
3744 .alg = CIPHER_ALG_DES,
3745 .mode = CIPHER_MODE_ECB,
3746 },
3747 .auth_info = {
3748 .alg = HASH_ALG_NONE,
3749 .mode = HASH_MODE_NONE,
3750 },
3751 },
3752 {
3753 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3754 .alg.crypto = {
3755 .cra_name = "ofb(des3_ede)",
3756 .cra_driver_name = "ofb-des3-iproc",
3757 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3758 .cra_ablkcipher = {
3759 .min_keysize = DES3_EDE_KEY_SIZE,
3760 .max_keysize = DES3_EDE_KEY_SIZE,
3761 .ivsize = DES3_EDE_BLOCK_SIZE,
3762 }
3763 },
3764 .cipher_info = {
3765 .alg = CIPHER_ALG_3DES,
3766 .mode = CIPHER_MODE_OFB,
3767 },
3768 .auth_info = {
3769 .alg = HASH_ALG_NONE,
3770 .mode = HASH_MODE_NONE,
3771 },
3772 },
3773 {
3774 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3775 .alg.crypto = {
3776 .cra_name = "cbc(des3_ede)",
3777 .cra_driver_name = "cbc-des3-iproc",
3778 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3779 .cra_ablkcipher = {
3780 .min_keysize = DES3_EDE_KEY_SIZE,
3781 .max_keysize = DES3_EDE_KEY_SIZE,
3782 .ivsize = DES3_EDE_BLOCK_SIZE,
3783 }
3784 },
3785 .cipher_info = {
3786 .alg = CIPHER_ALG_3DES,
3787 .mode = CIPHER_MODE_CBC,
3788 },
3789 .auth_info = {
3790 .alg = HASH_ALG_NONE,
3791 .mode = HASH_MODE_NONE,
3792 },
3793 },
3794 {
3795 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3796 .alg.crypto = {
3797 .cra_name = "ecb(des3_ede)",
3798 .cra_driver_name = "ecb-des3-iproc",
3799 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3800 .cra_ablkcipher = {
3801 .min_keysize = DES3_EDE_KEY_SIZE,
3802 .max_keysize = DES3_EDE_KEY_SIZE,
3803 .ivsize = 0,
3804 }
3805 },
3806 .cipher_info = {
3807 .alg = CIPHER_ALG_3DES,
3808 .mode = CIPHER_MODE_ECB,
3809 },
3810 .auth_info = {
3811 .alg = HASH_ALG_NONE,
3812 .mode = HASH_MODE_NONE,
3813 },
3814 },
3815 {
3816 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3817 .alg.crypto = {
3818 .cra_name = "ofb(aes)",
3819 .cra_driver_name = "ofb-aes-iproc",
3820 .cra_blocksize = AES_BLOCK_SIZE,
3821 .cra_ablkcipher = {
3822 .min_keysize = AES_MIN_KEY_SIZE,
3823 .max_keysize = AES_MAX_KEY_SIZE,
3824 .ivsize = AES_BLOCK_SIZE,
3825 }
3826 },
3827 .cipher_info = {
3828 .alg = CIPHER_ALG_AES,
3829 .mode = CIPHER_MODE_OFB,
3830 },
3831 .auth_info = {
3832 .alg = HASH_ALG_NONE,
3833 .mode = HASH_MODE_NONE,
3834 },
3835 },
3836 {
3837 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3838 .alg.crypto = {
3839 .cra_name = "cbc(aes)",
3840 .cra_driver_name = "cbc-aes-iproc",
3841 .cra_blocksize = AES_BLOCK_SIZE,
3842 .cra_ablkcipher = {
3843 .min_keysize = AES_MIN_KEY_SIZE,
3844 .max_keysize = AES_MAX_KEY_SIZE,
3845 .ivsize = AES_BLOCK_SIZE,
3846 }
3847 },
3848 .cipher_info = {
3849 .alg = CIPHER_ALG_AES,
3850 .mode = CIPHER_MODE_CBC,
3851 },
3852 .auth_info = {
3853 .alg = HASH_ALG_NONE,
3854 .mode = HASH_MODE_NONE,
3855 },
3856 },
3857 {
3858 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3859 .alg.crypto = {
3860 .cra_name = "ecb(aes)",
3861 .cra_driver_name = "ecb-aes-iproc",
3862 .cra_blocksize = AES_BLOCK_SIZE,
3863 .cra_ablkcipher = {
3864 .min_keysize = AES_MIN_KEY_SIZE,
3865 .max_keysize = AES_MAX_KEY_SIZE,
3866 .ivsize = 0,
3867 }
3868 },
3869 .cipher_info = {
3870 .alg = CIPHER_ALG_AES,
3871 .mode = CIPHER_MODE_ECB,
3872 },
3873 .auth_info = {
3874 .alg = HASH_ALG_NONE,
3875 .mode = HASH_MODE_NONE,
3876 },
3877 },
3878 {
3879 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3880 .alg.crypto = {
3881 .cra_name = "ctr(aes)",
3882 .cra_driver_name = "ctr-aes-iproc",
3883 .cra_blocksize = AES_BLOCK_SIZE,
3884 .cra_ablkcipher = {
3885
3886 .min_keysize = AES_MIN_KEY_SIZE,
3887 .max_keysize = AES_MAX_KEY_SIZE,
3888 .ivsize = AES_BLOCK_SIZE,
3889 }
3890 },
3891 .cipher_info = {
3892 .alg = CIPHER_ALG_AES,
3893 .mode = CIPHER_MODE_CTR,
3894 },
3895 .auth_info = {
3896 .alg = HASH_ALG_NONE,
3897 .mode = HASH_MODE_NONE,
3898 },
3899 },
3900{
3901 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3902 .alg.crypto = {
3903 .cra_name = "xts(aes)",
3904 .cra_driver_name = "xts-aes-iproc",
3905 .cra_blocksize = AES_BLOCK_SIZE,
3906 .cra_ablkcipher = {
3907 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3908 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3909 .ivsize = AES_BLOCK_SIZE,
3910 }
3911 },
3912 .cipher_info = {
3913 .alg = CIPHER_ALG_AES,
3914 .mode = CIPHER_MODE_XTS,
3915 },
3916 .auth_info = {
3917 .alg = HASH_ALG_NONE,
3918 .mode = HASH_MODE_NONE,
3919 },
3920 },
3921
3922
3923 {
3924 .type = CRYPTO_ALG_TYPE_AHASH,
3925 .alg.hash = {
3926 .halg.digestsize = MD5_DIGEST_SIZE,
3927 .halg.base = {
3928 .cra_name = "md5",
3929 .cra_driver_name = "md5-iproc",
3930 .cra_blocksize = MD5_BLOCK_WORDS * 4,
3931 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3932 CRYPTO_ALG_ASYNC,
3933 }
3934 },
3935 .cipher_info = {
3936 .alg = CIPHER_ALG_NONE,
3937 .mode = CIPHER_MODE_NONE,
3938 },
3939 .auth_info = {
3940 .alg = HASH_ALG_MD5,
3941 .mode = HASH_MODE_HASH,
3942 },
3943 },
3944 {
3945 .type = CRYPTO_ALG_TYPE_AHASH,
3946 .alg.hash = {
3947 .halg.digestsize = MD5_DIGEST_SIZE,
3948 .halg.base = {
3949 .cra_name = "hmac(md5)",
3950 .cra_driver_name = "hmac-md5-iproc",
3951 .cra_blocksize = MD5_BLOCK_WORDS * 4,
3952 }
3953 },
3954 .cipher_info = {
3955 .alg = CIPHER_ALG_NONE,
3956 .mode = CIPHER_MODE_NONE,
3957 },
3958 .auth_info = {
3959 .alg = HASH_ALG_MD5,
3960 .mode = HASH_MODE_HMAC,
3961 },
3962 },
3963 {.type = CRYPTO_ALG_TYPE_AHASH,
3964 .alg.hash = {
3965 .halg.digestsize = SHA1_DIGEST_SIZE,
3966 .halg.base = {
3967 .cra_name = "sha1",
3968 .cra_driver_name = "sha1-iproc",
3969 .cra_blocksize = SHA1_BLOCK_SIZE,
3970 }
3971 },
3972 .cipher_info = {
3973 .alg = CIPHER_ALG_NONE,
3974 .mode = CIPHER_MODE_NONE,
3975 },
3976 .auth_info = {
3977 .alg = HASH_ALG_SHA1,
3978 .mode = HASH_MODE_HASH,
3979 },
3980 },
3981 {.type = CRYPTO_ALG_TYPE_AHASH,
3982 .alg.hash = {
3983 .halg.digestsize = SHA1_DIGEST_SIZE,
3984 .halg.base = {
3985 .cra_name = "hmac(sha1)",
3986 .cra_driver_name = "hmac-sha1-iproc",
3987 .cra_blocksize = SHA1_BLOCK_SIZE,
3988 }
3989 },
3990 .cipher_info = {
3991 .alg = CIPHER_ALG_NONE,
3992 .mode = CIPHER_MODE_NONE,
3993 },
3994 .auth_info = {
3995 .alg = HASH_ALG_SHA1,
3996 .mode = HASH_MODE_HMAC,
3997 },
3998 },
3999 {.type = CRYPTO_ALG_TYPE_AHASH,
4000 .alg.hash = {
4001 .halg.digestsize = SHA224_DIGEST_SIZE,
4002 .halg.base = {
4003 .cra_name = "sha224",
4004 .cra_driver_name = "sha224-iproc",
4005 .cra_blocksize = SHA224_BLOCK_SIZE,
4006 }
4007 },
4008 .cipher_info = {
4009 .alg = CIPHER_ALG_NONE,
4010 .mode = CIPHER_MODE_NONE,
4011 },
4012 .auth_info = {
4013 .alg = HASH_ALG_SHA224,
4014 .mode = HASH_MODE_HASH,
4015 },
4016 },
4017 {.type = CRYPTO_ALG_TYPE_AHASH,
4018 .alg.hash = {
4019 .halg.digestsize = SHA224_DIGEST_SIZE,
4020 .halg.base = {
4021 .cra_name = "hmac(sha224)",
4022 .cra_driver_name = "hmac-sha224-iproc",
4023 .cra_blocksize = SHA224_BLOCK_SIZE,
4024 }
4025 },
4026 .cipher_info = {
4027 .alg = CIPHER_ALG_NONE,
4028 .mode = CIPHER_MODE_NONE,
4029 },
4030 .auth_info = {
4031 .alg = HASH_ALG_SHA224,
4032 .mode = HASH_MODE_HMAC,
4033 },
4034 },
4035 {.type = CRYPTO_ALG_TYPE_AHASH,
4036 .alg.hash = {
4037 .halg.digestsize = SHA256_DIGEST_SIZE,
4038 .halg.base = {
4039 .cra_name = "sha256",
4040 .cra_driver_name = "sha256-iproc",
4041 .cra_blocksize = SHA256_BLOCK_SIZE,
4042 }
4043 },
4044 .cipher_info = {
4045 .alg = CIPHER_ALG_NONE,
4046 .mode = CIPHER_MODE_NONE,
4047 },
4048 .auth_info = {
4049 .alg = HASH_ALG_SHA256,
4050 .mode = HASH_MODE_HASH,
4051 },
4052 },
4053 {.type = CRYPTO_ALG_TYPE_AHASH,
4054 .alg.hash = {
4055 .halg.digestsize = SHA256_DIGEST_SIZE,
4056 .halg.base = {
4057 .cra_name = "hmac(sha256)",
4058 .cra_driver_name = "hmac-sha256-iproc",
4059 .cra_blocksize = SHA256_BLOCK_SIZE,
4060 }
4061 },
4062 .cipher_info = {
4063 .alg = CIPHER_ALG_NONE,
4064 .mode = CIPHER_MODE_NONE,
4065 },
4066 .auth_info = {
4067 .alg = HASH_ALG_SHA256,
4068 .mode = HASH_MODE_HMAC,
4069 },
4070 },
4071 {
4072 .type = CRYPTO_ALG_TYPE_AHASH,
4073 .alg.hash = {
4074 .halg.digestsize = SHA384_DIGEST_SIZE,
4075 .halg.base = {
4076 .cra_name = "sha384",
4077 .cra_driver_name = "sha384-iproc",
4078 .cra_blocksize = SHA384_BLOCK_SIZE,
4079 }
4080 },
4081 .cipher_info = {
4082 .alg = CIPHER_ALG_NONE,
4083 .mode = CIPHER_MODE_NONE,
4084 },
4085 .auth_info = {
4086 .alg = HASH_ALG_SHA384,
4087 .mode = HASH_MODE_HASH,
4088 },
4089 },
4090 {
4091 .type = CRYPTO_ALG_TYPE_AHASH,
4092 .alg.hash = {
4093 .halg.digestsize = SHA384_DIGEST_SIZE,
4094 .halg.base = {
4095 .cra_name = "hmac(sha384)",
4096 .cra_driver_name = "hmac-sha384-iproc",
4097 .cra_blocksize = SHA384_BLOCK_SIZE,
4098 }
4099 },
4100 .cipher_info = {
4101 .alg = CIPHER_ALG_NONE,
4102 .mode = CIPHER_MODE_NONE,
4103 },
4104 .auth_info = {
4105 .alg = HASH_ALG_SHA384,
4106 .mode = HASH_MODE_HMAC,
4107 },
4108 },
4109 {
4110 .type = CRYPTO_ALG_TYPE_AHASH,
4111 .alg.hash = {
4112 .halg.digestsize = SHA512_DIGEST_SIZE,
4113 .halg.base = {
4114 .cra_name = "sha512",
4115 .cra_driver_name = "sha512-iproc",
4116 .cra_blocksize = SHA512_BLOCK_SIZE,
4117 }
4118 },
4119 .cipher_info = {
4120 .alg = CIPHER_ALG_NONE,
4121 .mode = CIPHER_MODE_NONE,
4122 },
4123 .auth_info = {
4124 .alg = HASH_ALG_SHA512,
4125 .mode = HASH_MODE_HASH,
4126 },
4127 },
4128 {
4129 .type = CRYPTO_ALG_TYPE_AHASH,
4130 .alg.hash = {
4131 .halg.digestsize = SHA512_DIGEST_SIZE,
4132 .halg.base = {
4133 .cra_name = "hmac(sha512)",
4134 .cra_driver_name = "hmac-sha512-iproc",
4135 .cra_blocksize = SHA512_BLOCK_SIZE,
4136 }
4137 },
4138 .cipher_info = {
4139 .alg = CIPHER_ALG_NONE,
4140 .mode = CIPHER_MODE_NONE,
4141 },
4142 .auth_info = {
4143 .alg = HASH_ALG_SHA512,
4144 .mode = HASH_MODE_HMAC,
4145 },
4146 },
4147 {
4148 .type = CRYPTO_ALG_TYPE_AHASH,
4149 .alg.hash = {
4150 .halg.digestsize = SHA3_224_DIGEST_SIZE,
4151 .halg.base = {
4152 .cra_name = "sha3-224",
4153 .cra_driver_name = "sha3-224-iproc",
4154 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4155 }
4156 },
4157 .cipher_info = {
4158 .alg = CIPHER_ALG_NONE,
4159 .mode = CIPHER_MODE_NONE,
4160 },
4161 .auth_info = {
4162 .alg = HASH_ALG_SHA3_224,
4163 .mode = HASH_MODE_HASH,
4164 },
4165 },
4166 {
4167 .type = CRYPTO_ALG_TYPE_AHASH,
4168 .alg.hash = {
4169 .halg.digestsize = SHA3_224_DIGEST_SIZE,
4170 .halg.base = {
4171 .cra_name = "hmac(sha3-224)",
4172 .cra_driver_name = "hmac-sha3-224-iproc",
4173 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4174 }
4175 },
4176 .cipher_info = {
4177 .alg = CIPHER_ALG_NONE,
4178 .mode = CIPHER_MODE_NONE,
4179 },
4180 .auth_info = {
4181 .alg = HASH_ALG_SHA3_224,
4182 .mode = HASH_MODE_HMAC
4183 },
4184 },
4185 {
4186 .type = CRYPTO_ALG_TYPE_AHASH,
4187 .alg.hash = {
4188 .halg.digestsize = SHA3_256_DIGEST_SIZE,
4189 .halg.base = {
4190 .cra_name = "sha3-256",
4191 .cra_driver_name = "sha3-256-iproc",
4192 .cra_blocksize = SHA3_256_BLOCK_SIZE,
4193 }
4194 },
4195 .cipher_info = {
4196 .alg = CIPHER_ALG_NONE,
4197 .mode = CIPHER_MODE_NONE,
4198 },
4199 .auth_info = {
4200 .alg = HASH_ALG_SHA3_256,
4201 .mode = HASH_MODE_HASH,
4202 },
4203 },
4204 {
4205 .type = CRYPTO_ALG_TYPE_AHASH,
4206 .alg.hash = {
4207 .halg.digestsize = SHA3_256_DIGEST_SIZE,
4208 .halg.base = {
4209 .cra_name = "hmac(sha3-256)",
4210 .cra_driver_name = "hmac-sha3-256-iproc",
4211 .cra_blocksize = SHA3_256_BLOCK_SIZE,
4212 }
4213 },
4214 .cipher_info = {
4215 .alg = CIPHER_ALG_NONE,
4216 .mode = CIPHER_MODE_NONE,
4217 },
4218 .auth_info = {
4219 .alg = HASH_ALG_SHA3_256,
4220 .mode = HASH_MODE_HMAC,
4221 },
4222 },
4223 {
4224 .type = CRYPTO_ALG_TYPE_AHASH,
4225 .alg.hash = {
4226 .halg.digestsize = SHA3_384_DIGEST_SIZE,
4227 .halg.base = {
4228 .cra_name = "sha3-384",
4229 .cra_driver_name = "sha3-384-iproc",
4230 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4231 }
4232 },
4233 .cipher_info = {
4234 .alg = CIPHER_ALG_NONE,
4235 .mode = CIPHER_MODE_NONE,
4236 },
4237 .auth_info = {
4238 .alg = HASH_ALG_SHA3_384,
4239 .mode = HASH_MODE_HASH,
4240 },
4241 },
4242 {
4243 .type = CRYPTO_ALG_TYPE_AHASH,
4244 .alg.hash = {
4245 .halg.digestsize = SHA3_384_DIGEST_SIZE,
4246 .halg.base = {
4247 .cra_name = "hmac(sha3-384)",
4248 .cra_driver_name = "hmac-sha3-384-iproc",
4249 .cra_blocksize = SHA3_384_BLOCK_SIZE,
4250 }
4251 },
4252 .cipher_info = {
4253 .alg = CIPHER_ALG_NONE,
4254 .mode = CIPHER_MODE_NONE,
4255 },
4256 .auth_info = {
4257 .alg = HASH_ALG_SHA3_384,
4258 .mode = HASH_MODE_HMAC,
4259 },
4260 },
4261 {
4262 .type = CRYPTO_ALG_TYPE_AHASH,
4263 .alg.hash = {
4264 .halg.digestsize = SHA3_512_DIGEST_SIZE,
4265 .halg.base = {
4266 .cra_name = "sha3-512",
4267 .cra_driver_name = "sha3-512-iproc",
4268 .cra_blocksize = SHA3_512_BLOCK_SIZE,
4269 }
4270 },
4271 .cipher_info = {
4272 .alg = CIPHER_ALG_NONE,
4273 .mode = CIPHER_MODE_NONE,
4274 },
4275 .auth_info = {
4276 .alg = HASH_ALG_SHA3_512,
4277 .mode = HASH_MODE_HASH,
4278 },
4279 },
4280 {
4281 .type = CRYPTO_ALG_TYPE_AHASH,
4282 .alg.hash = {
4283 .halg.digestsize = SHA3_512_DIGEST_SIZE,
4284 .halg.base = {
4285 .cra_name = "hmac(sha3-512)",
4286 .cra_driver_name = "hmac-sha3-512-iproc",
4287 .cra_blocksize = SHA3_512_BLOCK_SIZE,
4288 }
4289 },
4290 .cipher_info = {
4291 .alg = CIPHER_ALG_NONE,
4292 .mode = CIPHER_MODE_NONE,
4293 },
4294 .auth_info = {
4295 .alg = HASH_ALG_SHA3_512,
4296 .mode = HASH_MODE_HMAC,
4297 },
4298 },
4299 {
4300 .type = CRYPTO_ALG_TYPE_AHASH,
4301 .alg.hash = {
4302 .halg.digestsize = AES_BLOCK_SIZE,
4303 .halg.base = {
4304 .cra_name = "xcbc(aes)",
4305 .cra_driver_name = "xcbc-aes-iproc",
4306 .cra_blocksize = AES_BLOCK_SIZE,
4307 }
4308 },
4309 .cipher_info = {
4310 .alg = CIPHER_ALG_NONE,
4311 .mode = CIPHER_MODE_NONE,
4312 },
4313 .auth_info = {
4314 .alg = HASH_ALG_AES,
4315 .mode = HASH_MODE_XCBC,
4316 },
4317 },
4318 {
4319 .type = CRYPTO_ALG_TYPE_AHASH,
4320 .alg.hash = {
4321 .halg.digestsize = AES_BLOCK_SIZE,
4322 .halg.base = {
4323 .cra_name = "cmac(aes)",
4324 .cra_driver_name = "cmac-aes-iproc",
4325 .cra_blocksize = AES_BLOCK_SIZE,
4326 }
4327 },
4328 .cipher_info = {
4329 .alg = CIPHER_ALG_NONE,
4330 .mode = CIPHER_MODE_NONE,
4331 },
4332 .auth_info = {
4333 .alg = HASH_ALG_AES,
4334 .mode = HASH_MODE_CMAC,
4335 },
4336 },
4337};
4338
4339static int generic_cra_init(struct crypto_tfm *tfm,
4340 struct iproc_alg_s *cipher_alg)
4341{
4342 struct spu_hw *spu = &iproc_priv.spu;
4343 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4344 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
4345
4346 flow_log("%s()\n", __func__);
4347
4348 ctx->alg = cipher_alg;
4349 ctx->cipher = cipher_alg->cipher_info;
4350 ctx->auth = cipher_alg->auth_info;
4351 ctx->auth_first = cipher_alg->auth_first;
4352 ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
4353 ctx->cipher.mode,
4354 blocksize);
4355 ctx->fallback_cipher = NULL;
4356
4357 ctx->enckeylen = 0;
4358 ctx->authkeylen = 0;
4359
4360 atomic_inc(&iproc_priv.stream_count);
4361 atomic_inc(&iproc_priv.session_count);
4362
4363 return 0;
4364}
4365
4366static int ablkcipher_cra_init(struct crypto_tfm *tfm)
4367{
4368 struct crypto_alg *alg = tfm->__crt_alg;
4369 struct iproc_alg_s *cipher_alg;
4370
4371 flow_log("%s()\n", __func__);
4372
4373 tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s);
4374
4375 cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto);
4376 return generic_cra_init(tfm, cipher_alg);
4377}
4378
4379static int ahash_cra_init(struct crypto_tfm *tfm)
4380{
4381 int err;
4382 struct crypto_alg *alg = tfm->__crt_alg;
4383 struct iproc_alg_s *cipher_alg;
4384
4385 cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
4386 alg.hash);
4387
4388 err = generic_cra_init(tfm, cipher_alg);
4389 flow_log("%s()\n", __func__);
4390
4391
4392
4393
4394
4395 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4396 sizeof(struct iproc_reqctx_s));
4397
4398 return err;
4399}
4400
4401static int aead_cra_init(struct crypto_aead *aead)
4402{
4403 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4404 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4405 struct crypto_alg *alg = tfm->__crt_alg;
4406 struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
4407 struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
4408 alg.aead);
4409
4410 int err = generic_cra_init(tfm, cipher_alg);
4411
4412 flow_log("%s()\n", __func__);
4413
4414 crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
4415 ctx->is_esp = false;
4416 ctx->salt_len = 0;
4417 ctx->salt_offset = 0;
4418
4419
4420 get_random_bytes(ctx->iv, MAX_IV_SIZE);
4421 flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE);
4422
4423 if (!err) {
4424 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
4425 flow_log("%s() creating fallback cipher\n", __func__);
4426
4427 ctx->fallback_cipher =
4428 crypto_alloc_aead(alg->cra_name, 0,
4429 CRYPTO_ALG_ASYNC |
4430 CRYPTO_ALG_NEED_FALLBACK);
4431 if (IS_ERR(ctx->fallback_cipher)) {
4432 pr_err("%s() Error: failed to allocate fallback for %s\n",
4433 __func__, alg->cra_name);
4434 return PTR_ERR(ctx->fallback_cipher);
4435 }
4436 }
4437 }
4438
4439 return err;
4440}
4441
4442static void generic_cra_exit(struct crypto_tfm *tfm)
4443{
4444 atomic_dec(&iproc_priv.session_count);
4445}
4446
4447static void aead_cra_exit(struct crypto_aead *aead)
4448{
4449 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4450 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4451
4452 generic_cra_exit(tfm);
4453
4454 if (ctx->fallback_cipher) {
4455 crypto_free_aead(ctx->fallback_cipher);
4456 ctx->fallback_cipher = NULL;
4457 }
4458}
4459
4460
4461
4462
4463
4464
4465
4466
4467static void spu_functions_register(struct device *dev,
4468 enum spu_spu_type spu_type,
4469 enum spu_spu_subtype spu_subtype)
4470{
4471 struct spu_hw *spu = &iproc_priv.spu;
4472
4473 if (spu_type == SPU_TYPE_SPUM) {
4474 dev_dbg(dev, "Registering SPUM functions");
4475 spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
4476 spu->spu_payload_length = spum_payload_length;
4477 spu->spu_response_hdr_len = spum_response_hdr_len;
4478 spu->spu_hash_pad_len = spum_hash_pad_len;
4479 spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
4480 spu->spu_assoc_resp_len = spum_assoc_resp_len;
4481 spu->spu_aead_ivlen = spum_aead_ivlen;
4482 spu->spu_hash_type = spum_hash_type;
4483 spu->spu_digest_size = spum_digest_size;
4484 spu->spu_create_request = spum_create_request;
4485 spu->spu_cipher_req_init = spum_cipher_req_init;
4486 spu->spu_cipher_req_finish = spum_cipher_req_finish;
4487 spu->spu_request_pad = spum_request_pad;
4488 spu->spu_tx_status_len = spum_tx_status_len;
4489 spu->spu_rx_status_len = spum_rx_status_len;
4490 spu->spu_status_process = spum_status_process;
4491 spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
4492 spu->spu_ccm_update_iv = spum_ccm_update_iv;
4493 spu->spu_wordalign_padlen = spum_wordalign_padlen;
4494 if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
4495 spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
4496 else
4497 spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
4498 } else {
4499 dev_dbg(dev, "Registering SPU2 functions");
4500 spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
4501 spu->spu_ctx_max_payload = spu2_ctx_max_payload;
4502 spu->spu_payload_length = spu2_payload_length;
4503 spu->spu_response_hdr_len = spu2_response_hdr_len;
4504 spu->spu_hash_pad_len = spu2_hash_pad_len;
4505 spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
4506 spu->spu_assoc_resp_len = spu2_assoc_resp_len;
4507 spu->spu_aead_ivlen = spu2_aead_ivlen;
4508 spu->spu_hash_type = spu2_hash_type;
4509 spu->spu_digest_size = spu2_digest_size;
4510 spu->spu_create_request = spu2_create_request;
4511 spu->spu_cipher_req_init = spu2_cipher_req_init;
4512 spu->spu_cipher_req_finish = spu2_cipher_req_finish;
4513 spu->spu_request_pad = spu2_request_pad;
4514 spu->spu_tx_status_len = spu2_tx_status_len;
4515 spu->spu_rx_status_len = spu2_rx_status_len;
4516 spu->spu_status_process = spu2_status_process;
4517 spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
4518 spu->spu_ccm_update_iv = spu2_ccm_update_iv;
4519 spu->spu_wordalign_padlen = spu2_wordalign_padlen;
4520 }
4521}
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531static int spu_mb_init(struct device *dev)
4532{
4533 struct mbox_client *mcl = &iproc_priv.mcl;
4534 int err, i;
4535
4536 iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
4537 sizeof(struct mbox_chan *), GFP_KERNEL);
4538 if (!iproc_priv.mbox)
4539 return -ENOMEM;
4540
4541 mcl->dev = dev;
4542 mcl->tx_block = false;
4543 mcl->tx_tout = 0;
4544 mcl->knows_txdone = true;
4545 mcl->rx_callback = spu_rx_callback;
4546 mcl->tx_done = NULL;
4547
4548 for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4549 iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
4550 if (IS_ERR(iproc_priv.mbox[i])) {
4551 err = (int)PTR_ERR(iproc_priv.mbox[i]);
4552 dev_err(dev,
4553 "Mbox channel %d request failed with err %d",
4554 i, err);
4555 iproc_priv.mbox[i] = NULL;
4556 goto free_channels;
4557 }
4558 }
4559
4560 return 0;
4561free_channels:
4562 for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4563 if (iproc_priv.mbox[i])
4564 mbox_free_channel(iproc_priv.mbox[i]);
4565 }
4566
4567 return err;
4568}
4569
4570static void spu_mb_release(struct platform_device *pdev)
4571{
4572 int i;
4573
4574 for (i = 0; i < iproc_priv.spu.num_chan; i++)
4575 mbox_free_channel(iproc_priv.mbox[i]);
4576}
4577
4578static void spu_counters_init(void)
4579{
4580 int i;
4581 int j;
4582
4583 atomic_set(&iproc_priv.session_count, 0);
4584 atomic_set(&iproc_priv.stream_count, 0);
4585 atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
4586 atomic64_set(&iproc_priv.bytes_in, 0);
4587 atomic64_set(&iproc_priv.bytes_out, 0);
4588 for (i = 0; i < SPU_OP_NUM; i++) {
4589 atomic_set(&iproc_priv.op_counts[i], 0);
4590 atomic_set(&iproc_priv.setkey_cnt[i], 0);
4591 }
4592 for (i = 0; i < CIPHER_ALG_LAST; i++)
4593 for (j = 0; j < CIPHER_MODE_LAST; j++)
4594 atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
4595
4596 for (i = 0; i < HASH_ALG_LAST; i++) {
4597 atomic_set(&iproc_priv.hash_cnt[i], 0);
4598 atomic_set(&iproc_priv.hmac_cnt[i], 0);
4599 }
4600 for (i = 0; i < AEAD_TYPE_LAST; i++)
4601 atomic_set(&iproc_priv.aead_cnt[i], 0);
4602
4603 atomic_set(&iproc_priv.mb_no_spc, 0);
4604 atomic_set(&iproc_priv.mb_send_fail, 0);
4605 atomic_set(&iproc_priv.bad_icv, 0);
4606}
4607
4608static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
4609{
4610 struct spu_hw *spu = &iproc_priv.spu;
4611 struct crypto_alg *crypto = &driver_alg->alg.crypto;
4612 int err;
4613
4614
4615 if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) &&
4616 (spu->spu_type == SPU_TYPE_SPU2))
4617 return 0;
4618
4619 crypto->cra_module = THIS_MODULE;
4620 crypto->cra_priority = cipher_pri;
4621 crypto->cra_alignmask = 0;
4622 crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
4623 INIT_LIST_HEAD(&crypto->cra_list);
4624
4625 crypto->cra_init = ablkcipher_cra_init;
4626 crypto->cra_exit = generic_cra_exit;
4627 crypto->cra_type = &crypto_ablkcipher_type;
4628 crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4629 CRYPTO_ALG_KERN_DRIVER_ONLY;
4630
4631 crypto->cra_ablkcipher.setkey = ablkcipher_setkey;
4632 crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt;
4633 crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt;
4634
4635 err = crypto_register_alg(crypto);
4636
4637 if (err == 0)
4638 driver_alg->registered = true;
4639 pr_debug(" registered ablkcipher %s\n", crypto->cra_driver_name);
4640 return err;
4641}
4642
4643static int spu_register_ahash(struct iproc_alg_s *driver_alg)
4644{
4645 struct spu_hw *spu = &iproc_priv.spu;
4646 struct ahash_alg *hash = &driver_alg->alg.hash;
4647 int err;
4648
4649
4650 if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4651 (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
4652 (spu->spu_type == SPU_TYPE_SPUM))
4653 return 0;
4654
4655
4656 if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
4657 (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
4658 return 0;
4659
4660 hash->halg.base.cra_module = THIS_MODULE;
4661 hash->halg.base.cra_priority = hash_pri;
4662 hash->halg.base.cra_alignmask = 0;
4663 hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4664 hash->halg.base.cra_init = ahash_cra_init;
4665 hash->halg.base.cra_exit = generic_cra_exit;
4666 hash->halg.base.cra_type = &crypto_ahash_type;
4667 hash->halg.base.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
4668 hash->halg.statesize = sizeof(struct spu_hash_export_s);
4669
4670 if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
4671 hash->setkey = ahash_setkey;
4672 hash->init = ahash_init;
4673 hash->update = ahash_update;
4674 hash->final = ahash_final;
4675 hash->finup = ahash_finup;
4676 hash->digest = ahash_digest;
4677 } else {
4678 hash->setkey = ahash_hmac_setkey;
4679 hash->init = ahash_hmac_init;
4680 hash->update = ahash_hmac_update;
4681 hash->final = ahash_hmac_final;
4682 hash->finup = ahash_hmac_finup;
4683 hash->digest = ahash_hmac_digest;
4684 }
4685 hash->export = ahash_export;
4686 hash->import = ahash_import;
4687
4688 err = crypto_register_ahash(hash);
4689
4690 if (err == 0)
4691 driver_alg->registered = true;
4692 pr_debug(" registered ahash %s\n",
4693 hash->halg.base.cra_driver_name);
4694 return err;
4695}
4696
4697static int spu_register_aead(struct iproc_alg_s *driver_alg)
4698{
4699 struct aead_alg *aead = &driver_alg->alg.aead;
4700 int err;
4701
4702 aead->base.cra_module = THIS_MODULE;
4703 aead->base.cra_priority = aead_pri;
4704 aead->base.cra_alignmask = 0;
4705 aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4706 INIT_LIST_HEAD(&aead->base.cra_list);
4707
4708 aead->base.cra_flags |= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
4709
4710 aead->setauthsize = aead_setauthsize;
4711 aead->encrypt = aead_encrypt;
4712 aead->decrypt = aead_decrypt;
4713 aead->init = aead_cra_init;
4714 aead->exit = aead_cra_exit;
4715
4716 err = crypto_register_aead(aead);
4717
4718 if (err == 0)
4719 driver_alg->registered = true;
4720 pr_debug(" registered aead %s\n", aead->base.cra_driver_name);
4721 return err;
4722}
4723
4724
4725static int spu_algs_register(struct device *dev)
4726{
4727 int i, j;
4728 int err;
4729
4730 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4731 switch (driver_algs[i].type) {
4732 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4733 err = spu_register_ablkcipher(&driver_algs[i]);
4734 break;
4735 case CRYPTO_ALG_TYPE_AHASH:
4736 err = spu_register_ahash(&driver_algs[i]);
4737 break;
4738 case CRYPTO_ALG_TYPE_AEAD:
4739 err = spu_register_aead(&driver_algs[i]);
4740 break;
4741 default:
4742 dev_err(dev,
4743 "iproc-crypto: unknown alg type: %d",
4744 driver_algs[i].type);
4745 err = -EINVAL;
4746 }
4747
4748 if (err) {
4749 dev_err(dev, "alg registration failed with error %d\n",
4750 err);
4751 goto err_algs;
4752 }
4753 }
4754
4755 return 0;
4756
4757err_algs:
4758 for (j = 0; j < i; j++) {
4759
4760 if (!driver_algs[j].registered)
4761 continue;
4762 switch (driver_algs[j].type) {
4763 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4764 crypto_unregister_alg(&driver_algs[j].alg.crypto);
4765 driver_algs[j].registered = false;
4766 break;
4767 case CRYPTO_ALG_TYPE_AHASH:
4768 crypto_unregister_ahash(&driver_algs[j].alg.hash);
4769 driver_algs[j].registered = false;
4770 break;
4771 case CRYPTO_ALG_TYPE_AEAD:
4772 crypto_unregister_aead(&driver_algs[j].alg.aead);
4773 driver_algs[j].registered = false;
4774 break;
4775 }
4776 }
4777 return err;
4778}
4779
4780
4781
4782static struct spu_type_subtype spum_ns2_types = {
4783 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
4784};
4785
4786static struct spu_type_subtype spum_nsp_types = {
4787 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
4788};
4789
4790static struct spu_type_subtype spu2_types = {
4791 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
4792};
4793
4794static struct spu_type_subtype spu2_v2_types = {
4795 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
4796};
4797
4798static const struct of_device_id bcm_spu_dt_ids[] = {
4799 {
4800 .compatible = "brcm,spum-crypto",
4801 .data = &spum_ns2_types,
4802 },
4803 {
4804 .compatible = "brcm,spum-nsp-crypto",
4805 .data = &spum_nsp_types,
4806 },
4807 {
4808 .compatible = "brcm,spu2-crypto",
4809 .data = &spu2_types,
4810 },
4811 {
4812 .compatible = "brcm,spu2-v2-crypto",
4813 .data = &spu2_v2_types,
4814 },
4815 { }
4816};
4817
4818MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
4819
4820static int spu_dt_read(struct platform_device *pdev)
4821{
4822 struct device *dev = &pdev->dev;
4823 struct spu_hw *spu = &iproc_priv.spu;
4824 struct resource *spu_ctrl_regs;
4825 const struct spu_type_subtype *matched_spu_type;
4826 struct device_node *dn = pdev->dev.of_node;
4827 int err, i;
4828
4829
4830 spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
4831
4832 matched_spu_type = of_device_get_match_data(dev);
4833 if (!matched_spu_type) {
4834 dev_err(&pdev->dev, "Failed to match device\n");
4835 return -ENODEV;
4836 }
4837
4838 spu->spu_type = matched_spu_type->type;
4839 spu->spu_subtype = matched_spu_type->subtype;
4840
4841 i = 0;
4842 for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
4843 platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
4844
4845 spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
4846 if (IS_ERR(spu->reg_vbase[i])) {
4847 err = PTR_ERR(spu->reg_vbase[i]);
4848 dev_err(&pdev->dev, "Failed to map registers: %d\n",
4849 err);
4850 spu->reg_vbase[i] = NULL;
4851 return err;
4852 }
4853 }
4854 spu->num_spu = i;
4855 dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
4856
4857 return 0;
4858}
4859
4860int bcm_spu_probe(struct platform_device *pdev)
4861{
4862 struct device *dev = &pdev->dev;
4863 struct spu_hw *spu = &iproc_priv.spu;
4864 int err = 0;
4865
4866 iproc_priv.pdev = pdev;
4867 platform_set_drvdata(iproc_priv.pdev,
4868 &iproc_priv);
4869
4870 err = spu_dt_read(pdev);
4871 if (err < 0)
4872 goto failure;
4873
4874 err = spu_mb_init(&pdev->dev);
4875 if (err < 0)
4876 goto failure;
4877
4878 if (spu->spu_type == SPU_TYPE_SPUM)
4879 iproc_priv.bcm_hdr_len = 8;
4880 else if (spu->spu_type == SPU_TYPE_SPU2)
4881 iproc_priv.bcm_hdr_len = 0;
4882
4883 spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype);
4884
4885 spu_counters_init();
4886
4887 spu_setup_debugfs();
4888
4889 err = spu_algs_register(dev);
4890 if (err < 0)
4891 goto fail_reg;
4892
4893 return 0;
4894
4895fail_reg:
4896 spu_free_debugfs();
4897failure:
4898 spu_mb_release(pdev);
4899 dev_err(dev, "%s failed with error %d.\n", __func__, err);
4900
4901 return err;
4902}
4903
4904int bcm_spu_remove(struct platform_device *pdev)
4905{
4906 int i;
4907 struct device *dev = &pdev->dev;
4908 char *cdn;
4909
4910 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4911
4912
4913
4914
4915
4916 if (!driver_algs[i].registered)
4917 continue;
4918
4919 switch (driver_algs[i].type) {
4920 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4921 crypto_unregister_alg(&driver_algs[i].alg.crypto);
4922 dev_dbg(dev, " unregistered cipher %s\n",
4923 driver_algs[i].alg.crypto.cra_driver_name);
4924 driver_algs[i].registered = false;
4925 break;
4926 case CRYPTO_ALG_TYPE_AHASH:
4927 crypto_unregister_ahash(&driver_algs[i].alg.hash);
4928 cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
4929 dev_dbg(dev, " unregistered hash %s\n", cdn);
4930 driver_algs[i].registered = false;
4931 break;
4932 case CRYPTO_ALG_TYPE_AEAD:
4933 crypto_unregister_aead(&driver_algs[i].alg.aead);
4934 dev_dbg(dev, " unregistered aead %s\n",
4935 driver_algs[i].alg.aead.base.cra_driver_name);
4936 driver_algs[i].registered = false;
4937 break;
4938 }
4939 }
4940 spu_free_debugfs();
4941 spu_mb_release(pdev);
4942 return 0;
4943}
4944
4945
4946
4947static struct platform_driver bcm_spu_pdriver = {
4948 .driver = {
4949 .name = "brcm-spu-crypto",
4950 .of_match_table = of_match_ptr(bcm_spu_dt_ids),
4951 },
4952 .probe = bcm_spu_probe,
4953 .remove = bcm_spu_remove,
4954};
4955module_platform_driver(bcm_spu_pdriver);
4956
4957MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
4958MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4959MODULE_LICENSE("GPL v2");
4960