1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/kernel.h>
22#include <linux/interrupt.h>
23#include <linux/platform_device.h>
24#include <linux/scatterlist.h>
25#include <linux/crypto.h>
26#include <linux/kthread.h>
27#include <linux/rtnetlink.h>
28#include <linux/sched.h>
29#include <linux/of_address.h>
30#include <linux/of_device.h>
31#include <linux/io.h>
32#include <linux/bitops.h>
33
34#include <crypto/algapi.h>
35#include <crypto/aead.h>
36#include <crypto/internal/aead.h>
37#include <crypto/aes.h>
38#include <crypto/des.h>
39#include <crypto/hmac.h>
40#include <crypto/sha.h>
41#include <crypto/md5.h>
42#include <crypto/authenc.h>
43#include <crypto/skcipher.h>
44#include <crypto/hash.h>
45#include <crypto/sha3.h>
46
47#include "util.h"
48#include "cipher.h"
49#include "spu.h"
50#include "spum.h"
51#include "spu2.h"
52
53
54
55struct device_private iproc_priv;
56
57
58
59int flow_debug_logging;
60module_param(flow_debug_logging, int, 0644);
61MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
62
63int packet_debug_logging;
64module_param(packet_debug_logging, int, 0644);
65MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
66
67int debug_logging_sleep;
68module_param(debug_logging_sleep, int, 0644);
69MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
70
71
72
73
74
75
76
77
78
79
80static int cipher_pri = 150;
81module_param(cipher_pri, int, 0644);
82MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
83
84static int hash_pri = 100;
85module_param(hash_pri, int, 0644);
86MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
87
88static int aead_pri = 150;
89module_param(aead_pri, int, 0644);
90MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
91
92
93
94
95
96
97
98
99char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
100
101
102
103
104#define BCM_HDR_LEN iproc_priv.bcm_hdr_len
105
106
107#define MBOX_SLEEP_MIN 800
108#define MBOX_SLEEP_MAX 1000
109
110
111
112
113
114
115
116static u8 select_channel(void)
117{
118 u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
119
120 return chan_idx % iproc_priv.spu.num_chan;
121}
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143static int
144spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
145 struct iproc_reqctx_s *rctx,
146 u8 rx_frag_num,
147 unsigned int chunksize, u32 stat_pad_len)
148{
149 struct spu_hw *spu = &iproc_priv.spu;
150 struct scatterlist *sg;
151 struct iproc_ctx_s *ctx = rctx->ctx;
152 u32 datalen;
153
154 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
155 rctx->gfp);
156 if (!mssg->spu.dst)
157 return -ENOMEM;
158
159 sg = mssg->spu.dst;
160 sg_init_table(sg, rx_frag_num);
161
162 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
163
164
165 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
166 spu->spu_xts_tweak_in_payload())
167 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
168 SPU_XTS_TWEAK_SIZE);
169
170
171 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
172 rctx->dst_nents, chunksize);
173 if (datalen < chunksize) {
174 pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
175 __func__, chunksize, datalen);
176 return -EFAULT;
177 }
178
179 if (ctx->cipher.alg == CIPHER_ALG_RC4)
180
181 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN);
182
183 if (stat_pad_len)
184 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
185
186 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
187 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
188
189 return 0;
190}
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211static int
212spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
213 struct iproc_reqctx_s *rctx,
214 u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
215{
216 struct spu_hw *spu = &iproc_priv.spu;
217 struct scatterlist *sg;
218 struct iproc_ctx_s *ctx = rctx->ctx;
219 u32 datalen;
220 u32 stat_len;
221
222 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
223 rctx->gfp);
224 if (unlikely(!mssg->spu.src))
225 return -ENOMEM;
226
227 sg = mssg->spu.src;
228 sg_init_table(sg, tx_frag_num);
229
230 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
231 BCM_HDR_LEN + ctx->spu_req_hdr_len);
232
233
234 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
235 spu->spu_xts_tweak_in_payload())
236 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
237
238
239 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
240 rctx->src_nents, chunksize);
241 if (unlikely(datalen < chunksize)) {
242 pr_err("%s(): failed to copy src sg to mbox msg",
243 __func__);
244 return -EFAULT;
245 }
246
247 if (pad_len)
248 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
249
250 stat_len = spu->spu_tx_status_len();
251 if (stat_len) {
252 memset(rctx->msg_buf.tx_stat, 0, stat_len);
253 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
254 }
255 return 0;
256}
257
258static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
259 u8 chan_idx)
260{
261 int err;
262 int retry_cnt = 0;
263 struct device *dev = &(iproc_priv.pdev->dev);
264
265 err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
266 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
267 while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
268
269
270
271
272 retry_cnt++;
273 usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
274 err = mbox_send_message(iproc_priv.mbox[chan_idx],
275 mssg);
276 atomic_inc(&iproc_priv.mb_no_spc);
277 }
278 }
279 if (err < 0) {
280 atomic_inc(&iproc_priv.mb_send_fail);
281 return err;
282 }
283
284
285 err = mssg->error;
286 if (unlikely(err < 0)) {
287 dev_err(dev, "message error %d", err);
288
289 }
290
291
292 mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
293 return err;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
315{
316 struct spu_hw *spu = &iproc_priv.spu;
317 struct crypto_async_request *areq = rctx->parent;
318 struct ablkcipher_request *req =
319 container_of(areq, struct ablkcipher_request, base);
320 struct iproc_ctx_s *ctx = rctx->ctx;
321 struct spu_cipher_parms cipher_parms;
322 int err = 0;
323 unsigned int chunksize = 0;
324 int remaining = 0;
325 int chunk_start;
326
327
328 u8 local_iv_ctr[MAX_IV_SIZE];
329 u32 stat_pad_len;
330 u32 pad_len;
331 bool update_key = false;
332 struct brcm_message *mssg;
333
334
335 u8 rx_frag_num = 2;
336 u8 tx_frag_num = 1;
337
338 flow_log("%s\n", __func__);
339
340 cipher_parms.alg = ctx->cipher.alg;
341 cipher_parms.mode = ctx->cipher.mode;
342 cipher_parms.type = ctx->cipher_type;
343 cipher_parms.key_len = ctx->enckeylen;
344 cipher_parms.key_buf = ctx->enckey;
345 cipher_parms.iv_buf = local_iv_ctr;
346 cipher_parms.iv_len = rctx->iv_ctr_len;
347
348 mssg = &rctx->mb_mssg;
349 chunk_start = rctx->src_sent;
350 remaining = rctx->total_todo - chunk_start;
351
352
353 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
354 (remaining > ctx->max_payload))
355 chunksize = ctx->max_payload;
356 else
357 chunksize = remaining;
358
359 rctx->src_sent += chunksize;
360 rctx->total_sent = rctx->src_sent;
361
362
363 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
364 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
365
366 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
367 rctx->is_encrypt && chunk_start)
368
369
370
371
372 sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
373 rctx->iv_ctr_len,
374 chunk_start - rctx->iv_ctr_len);
375
376 if (rctx->iv_ctr_len) {
377
378 __builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
379 rctx->iv_ctr_len);
380
381
382 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
383 !rctx->is_encrypt) {
384
385
386
387
388 sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
389 rctx->iv_ctr_len,
390 rctx->src_sent - rctx->iv_ctr_len);
391 } else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
392
393
394
395
396
397
398
399
400
401 add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
402 }
403 }
404
405 if (ctx->cipher.alg == CIPHER_ALG_RC4) {
406 rx_frag_num++;
407 if (chunk_start) {
408
409
410
411
412 cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak;
413 update_key = true;
414 cipher_parms.type = CIPHER_TYPE_UPDT;
415 } else if (!rctx->is_encrypt) {
416
417
418
419
420
421
422 update_key = true;
423 cipher_parms.type = CIPHER_TYPE_INIT;
424 }
425 }
426
427 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
428 flow_log("max_payload infinite\n");
429 else
430 flow_log("max_payload %u\n", ctx->max_payload);
431
432 flow_log("sent:%u start:%u remains:%u size:%u\n",
433 rctx->src_sent, chunk_start, remaining, chunksize);
434
435
436 memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
437 sizeof(rctx->msg_buf.bcm_spu_req_hdr));
438
439
440
441
442
443
444 spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
445 ctx->spu_req_hdr_len, !(rctx->is_encrypt),
446 &cipher_parms, update_key, chunksize);
447
448 atomic64_add(chunksize, &iproc_priv.bytes_out);
449
450 stat_pad_len = spu->spu_wordalign_padlen(chunksize);
451 if (stat_pad_len)
452 rx_frag_num++;
453 pad_len = stat_pad_len;
454 if (pad_len) {
455 tx_frag_num++;
456 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
457 0, ctx->auth.alg, ctx->auth.mode,
458 rctx->total_sent, stat_pad_len);
459 }
460
461 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
462 ctx->spu_req_hdr_len);
463 packet_log("payload:\n");
464 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
465 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
466
467
468
469
470
471 memset(mssg, 0, sizeof(*mssg));
472 mssg->type = BRCM_MESSAGE_SPU;
473 mssg->ctx = rctx;
474
475
476 rx_frag_num += rctx->dst_nents;
477
478 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
479 spu->spu_xts_tweak_in_payload())
480 rx_frag_num++;
481
482 err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
483 stat_pad_len);
484 if (err)
485 return err;
486
487
488 tx_frag_num += rctx->src_nents;
489 if (spu->spu_tx_status_len())
490 tx_frag_num++;
491
492 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
493 spu->spu_xts_tweak_in_payload())
494 tx_frag_num++;
495
496 err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
497 pad_len);
498 if (err)
499 return err;
500
501 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
502 if (unlikely(err < 0))
503 return err;
504
505 return -EINPROGRESS;
506}
507
508
509
510
511
512
513static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx)
514{
515 struct spu_hw *spu = &iproc_priv.spu;
516#ifdef DEBUG
517 struct crypto_async_request *areq = rctx->parent;
518 struct ablkcipher_request *req = ablkcipher_request_cast(areq);
519#endif
520 struct iproc_ctx_s *ctx = rctx->ctx;
521 u32 payload_len;
522
523
524 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
525
526
527
528
529
530 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
531 spu->spu_xts_tweak_in_payload() &&
532 (payload_len >= SPU_XTS_TWEAK_SIZE))
533 payload_len -= SPU_XTS_TWEAK_SIZE;
534
535 atomic64_add(payload_len, &iproc_priv.bytes_in);
536
537 flow_log("%s() offset: %u, bd_len: %u BD:\n",
538 __func__, rctx->total_received, payload_len);
539
540 dump_sg(req->dst, rctx->total_received, payload_len);
541 if (ctx->cipher.alg == CIPHER_ALG_RC4)
542 packet_dump(" supdt ", rctx->msg_buf.c.supdt_tweak,
543 SPU_SUPDT_LEN);
544
545 rctx->total_received += payload_len;
546 if (rctx->total_received == rctx->total_todo) {
547 atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
548 atomic_inc(
549 &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
550 }
551}
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572static int
573spu_ahash_rx_sg_create(struct brcm_message *mssg,
574 struct iproc_reqctx_s *rctx,
575 u8 rx_frag_num, unsigned int digestsize,
576 u32 stat_pad_len)
577{
578 struct spu_hw *spu = &iproc_priv.spu;
579 struct scatterlist *sg;
580 struct iproc_ctx_s *ctx = rctx->ctx;
581
582 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
583 rctx->gfp);
584 if (!mssg->spu.dst)
585 return -ENOMEM;
586
587 sg = mssg->spu.dst;
588 sg_init_table(sg, rx_frag_num);
589
590 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
591
592
593 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
594
595 if (stat_pad_len)
596 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
597
598 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
599 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
600 return 0;
601}
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624static int
625spu_ahash_tx_sg_create(struct brcm_message *mssg,
626 struct iproc_reqctx_s *rctx,
627 u8 tx_frag_num,
628 u32 spu_hdr_len,
629 unsigned int hash_carry_len,
630 unsigned int new_data_len, u32 pad_len)
631{
632 struct spu_hw *spu = &iproc_priv.spu;
633 struct scatterlist *sg;
634 u32 datalen;
635 u32 stat_len;
636
637 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
638 rctx->gfp);
639 if (!mssg->spu.src)
640 return -ENOMEM;
641
642 sg = mssg->spu.src;
643 sg_init_table(sg, tx_frag_num);
644
645 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
646 BCM_HDR_LEN + spu_hdr_len);
647
648 if (hash_carry_len)
649 sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
650
651 if (new_data_len) {
652
653 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
654 rctx->src_nents, new_data_len);
655 if (datalen < new_data_len) {
656 pr_err("%s(): failed to copy src sg to mbox msg",
657 __func__);
658 return -EFAULT;
659 }
660 }
661
662 if (pad_len)
663 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
664
665 stat_len = spu->spu_tx_status_len();
666 if (stat_len) {
667 memset(rctx->msg_buf.tx_stat, 0, stat_len);
668 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
669 }
670
671 return 0;
672}
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700static int handle_ahash_req(struct iproc_reqctx_s *rctx)
701{
702 struct spu_hw *spu = &iproc_priv.spu;
703 struct crypto_async_request *areq = rctx->parent;
704 struct ahash_request *req = ahash_request_cast(areq);
705 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
706 struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
707 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
708 struct iproc_ctx_s *ctx = rctx->ctx;
709
710
711 unsigned int nbytes_to_hash = 0;
712 int err = 0;
713 unsigned int chunksize = 0;
714
715
716
717
718 unsigned int new_data_len;
719
720 unsigned int chunk_start = 0;
721 u32 db_size;
722 int pad_len = 0;
723 u32 data_pad_len = 0;
724 u32 stat_pad_len = 0;
725 struct brcm_message *mssg;
726 struct spu_request_opts req_opts;
727 struct spu_cipher_parms cipher_parms;
728 struct spu_hash_parms hash_parms;
729 struct spu_aead_parms aead_parms;
730 unsigned int local_nbuf;
731 u32 spu_hdr_len;
732 unsigned int digestsize;
733 u16 rem = 0;
734
735
736
737
738
739 u8 rx_frag_num = 3;
740 u8 tx_frag_num = 1;
741
742 flow_log("total_todo %u, total_sent %u\n",
743 rctx->total_todo, rctx->total_sent);
744
745 memset(&req_opts, 0, sizeof(req_opts));
746 memset(&cipher_parms, 0, sizeof(cipher_parms));
747 memset(&hash_parms, 0, sizeof(hash_parms));
748 memset(&aead_parms, 0, sizeof(aead_parms));
749
750 req_opts.bd_suppress = true;
751 hash_parms.alg = ctx->auth.alg;
752 hash_parms.mode = ctx->auth.mode;
753 hash_parms.type = HASH_TYPE_NONE;
754 hash_parms.key_buf = (u8 *)ctx->authkey;
755 hash_parms.key_len = ctx->authkeylen;
756
757
758
759
760
761
762
763
764 cipher_parms.type = ctx->cipher_type;
765
766 mssg = &rctx->mb_mssg;
767 chunk_start = rctx->src_sent;
768
769
770
771
772
773 nbytes_to_hash = rctx->total_todo - rctx->total_sent;
774 chunksize = nbytes_to_hash;
775 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
776 (chunksize > ctx->max_payload))
777 chunksize = ctx->max_payload;
778
779
780
781
782
783
784 if (!rctx->is_final) {
785 u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
786 u16 new_len;
787
788 rem = chunksize % blocksize;
789 if (rem) {
790
791 chunksize -= rem;
792 if (chunksize == 0) {
793
794 new_len = rem - rctx->hash_carry_len;
795 sg_copy_part_to_buf(req->src, dest, new_len,
796 rctx->src_sent);
797 rctx->hash_carry_len = rem;
798 flow_log("Exiting with hash carry len: %u\n",
799 rctx->hash_carry_len);
800 packet_dump(" buf: ",
801 rctx->hash_carry,
802 rctx->hash_carry_len);
803 return -EAGAIN;
804 }
805 }
806 }
807
808
809 local_nbuf = rctx->hash_carry_len;
810 rctx->hash_carry_len = 0;
811 if (local_nbuf)
812 tx_frag_num++;
813 new_data_len = chunksize - local_nbuf;
814
815
816 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
817 new_data_len);
818
819
820 if (hash_parms.alg == HASH_ALG_AES)
821 hash_parms.type = (enum hash_type)cipher_parms.type;
822 else
823 hash_parms.type = spu->spu_hash_type(rctx->total_sent);
824
825 digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
826 hash_parms.type);
827 hash_parms.digestsize = digestsize;
828
829
830 rctx->total_sent += chunksize;
831
832 rctx->src_sent += new_data_len;
833
834 if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
835 hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
836 hash_parms.mode,
837 chunksize,
838 blocksize);
839
840
841
842
843
844 if ((hash_parms.type == HASH_TYPE_UPDT) &&
845 (hash_parms.alg != HASH_ALG_AES)) {
846 hash_parms.key_buf = rctx->incr_hash;
847 hash_parms.key_len = digestsize;
848 }
849
850 atomic64_add(chunksize, &iproc_priv.bytes_out);
851
852 flow_log("%s() final: %u nbuf: %u ",
853 __func__, rctx->is_final, local_nbuf);
854
855 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
856 flow_log("max_payload infinite\n");
857 else
858 flow_log("max_payload %u\n", ctx->max_payload);
859
860 flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
861
862
863 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
864
865 hash_parms.prebuf_len = local_nbuf;
866 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
867 BCM_HDR_LEN,
868 &req_opts, &cipher_parms,
869 &hash_parms, &aead_parms,
870 new_data_len);
871
872 if (spu_hdr_len == 0) {
873 pr_err("Failed to create SPU request header\n");
874 return -EFAULT;
875 }
876
877
878
879
880
881 data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
882 db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
883 0, 0, hash_parms.pad_len);
884 if (spu->spu_tx_status_len())
885 stat_pad_len = spu->spu_wordalign_padlen(db_size);
886 if (stat_pad_len)
887 rx_frag_num++;
888 pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
889 if (pad_len) {
890 tx_frag_num++;
891 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
892 hash_parms.pad_len, ctx->auth.alg,
893 ctx->auth.mode, rctx->total_sent,
894 stat_pad_len);
895 }
896
897 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
898 spu_hdr_len);
899 packet_dump(" prebuf: ", rctx->hash_carry, local_nbuf);
900 flow_log("Data:\n");
901 dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
902 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
903
904
905
906
907
908 memset(mssg, 0, sizeof(*mssg));
909 mssg->type = BRCM_MESSAGE_SPU;
910 mssg->ctx = rctx;
911
912
913 err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
914 stat_pad_len);
915 if (err)
916 return err;
917
918
919 tx_frag_num += rctx->src_nents;
920 if (spu->spu_tx_status_len())
921 tx_frag_num++;
922 err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
923 local_nbuf, new_data_len, pad_len);
924 if (err)
925 return err;
926
927 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
928 if (unlikely(err < 0))
929 return err;
930
931 return -EINPROGRESS;
932}
933
934
935
936
937
938
939
940
941
942
943
944static int spu_hmac_outer_hash(struct ahash_request *req,
945 struct iproc_ctx_s *ctx)
946{
947 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
948 unsigned int blocksize =
949 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
950 int rc;
951
952 switch (ctx->auth.alg) {
953 case HASH_ALG_MD5:
954 rc = do_shash("md5", req->result, ctx->opad, blocksize,
955 req->result, ctx->digestsize, NULL, 0);
956 break;
957 case HASH_ALG_SHA1:
958 rc = do_shash("sha1", req->result, ctx->opad, blocksize,
959 req->result, ctx->digestsize, NULL, 0);
960 break;
961 case HASH_ALG_SHA224:
962 rc = do_shash("sha224", req->result, ctx->opad, blocksize,
963 req->result, ctx->digestsize, NULL, 0);
964 break;
965 case HASH_ALG_SHA256:
966 rc = do_shash("sha256", req->result, ctx->opad, blocksize,
967 req->result, ctx->digestsize, NULL, 0);
968 break;
969 case HASH_ALG_SHA384:
970 rc = do_shash("sha384", req->result, ctx->opad, blocksize,
971 req->result, ctx->digestsize, NULL, 0);
972 break;
973 case HASH_ALG_SHA512:
974 rc = do_shash("sha512", req->result, ctx->opad, blocksize,
975 req->result, ctx->digestsize, NULL, 0);
976 break;
977 default:
978 pr_err("%s() Error : unknown hmac type\n", __func__);
979 rc = -EINVAL;
980 }
981 return rc;
982}
983
984
985
986
987
988
989
990
991static int ahash_req_done(struct iproc_reqctx_s *rctx)
992{
993 struct spu_hw *spu = &iproc_priv.spu;
994 struct crypto_async_request *areq = rctx->parent;
995 struct ahash_request *req = ahash_request_cast(areq);
996 struct iproc_ctx_s *ctx = rctx->ctx;
997 int err;
998
999 memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
1000
1001 if (spu->spu_type == SPU_TYPE_SPUM) {
1002
1003
1004
1005 if (ctx->auth.alg == HASH_ALG_MD5) {
1006 __swab32s((u32 *)req->result);
1007 __swab32s(((u32 *)req->result) + 1);
1008 __swab32s(((u32 *)req->result) + 2);
1009 __swab32s(((u32 *)req->result) + 3);
1010 __swab32s(((u32 *)req->result) + 4);
1011 }
1012 }
1013
1014 flow_dump(" digest ", req->result, ctx->digestsize);
1015
1016
1017 if (rctx->is_sw_hmac) {
1018 err = spu_hmac_outer_hash(req, ctx);
1019 if (err < 0)
1020 return err;
1021 flow_dump(" hmac: ", req->result, ctx->digestsize);
1022 }
1023
1024 if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
1025 atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
1026 atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
1027 } else {
1028 atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
1029 atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
1030 }
1031
1032 return 0;
1033}
1034
1035
1036
1037
1038
1039
1040
1041static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
1042{
1043 struct iproc_ctx_s *ctx = rctx->ctx;
1044#ifdef DEBUG
1045 struct crypto_async_request *areq = rctx->parent;
1046 struct ahash_request *req = ahash_request_cast(areq);
1047 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1048 unsigned int blocksize =
1049 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
1050#endif
1051
1052
1053
1054
1055 memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1056
1057 flow_log("%s() blocksize:%u digestsize:%u\n",
1058 __func__, blocksize, ctx->digestsize);
1059
1060 atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
1061
1062 if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1063 ahash_req_done(rctx);
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090static int spu_aead_rx_sg_create(struct brcm_message *mssg,
1091 struct aead_request *req,
1092 struct iproc_reqctx_s *rctx,
1093 u8 rx_frag_num,
1094 unsigned int assoc_len,
1095 u32 ret_iv_len, unsigned int resp_len,
1096 unsigned int digestsize, u32 stat_pad_len)
1097{
1098 struct spu_hw *spu = &iproc_priv.spu;
1099 struct scatterlist *sg;
1100 struct iproc_ctx_s *ctx = rctx->ctx;
1101 u32 datalen;
1102 u32 assoc_buf_len;
1103 u8 data_padlen = 0;
1104
1105 if (ctx->is_rfc4543) {
1106
1107 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1108 assoc_len + resp_len);
1109 assoc_buf_len = assoc_len;
1110 } else {
1111 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1112 resp_len);
1113 assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
1114 assoc_len, ret_iv_len,
1115 rctx->is_encrypt);
1116 }
1117
1118 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1119
1120 data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
1121 resp_len +
1122 data_padlen);
1123
1124 if (data_padlen)
1125
1126 rx_frag_num++;
1127
1128 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
1129 rctx->gfp);
1130 if (!mssg->spu.dst)
1131 return -ENOMEM;
1132
1133 sg = mssg->spu.dst;
1134 sg_init_table(sg, rx_frag_num);
1135
1136
1137 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1138
1139 if (assoc_buf_len) {
1140
1141
1142
1143
1144 memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1145 sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1146 }
1147
1148 if (resp_len) {
1149
1150
1151
1152
1153 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1154 rctx->dst_nents, resp_len);
1155 if (datalen < (resp_len)) {
1156 pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1157 __func__, resp_len, datalen);
1158 return -EFAULT;
1159 }
1160 }
1161
1162
1163 if (data_padlen) {
1164 memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1165 sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1166 }
1167
1168
1169 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1170
1171 flow_log("stat_pad_len %u\n", stat_pad_len);
1172 if (stat_pad_len) {
1173 memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1174 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1175 }
1176
1177 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1178 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1179
1180 return 0;
1181}
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210static int spu_aead_tx_sg_create(struct brcm_message *mssg,
1211 struct iproc_reqctx_s *rctx,
1212 u8 tx_frag_num,
1213 u32 spu_hdr_len,
1214 struct scatterlist *assoc,
1215 unsigned int assoc_len,
1216 int assoc_nents,
1217 unsigned int aead_iv_len,
1218 unsigned int chunksize,
1219 u32 aad_pad_len, u32 pad_len, bool incl_icv)
1220{
1221 struct spu_hw *spu = &iproc_priv.spu;
1222 struct scatterlist *sg;
1223 struct scatterlist *assoc_sg = assoc;
1224 struct iproc_ctx_s *ctx = rctx->ctx;
1225 u32 datalen;
1226 u32 written;
1227 u32 assoc_offset = 0;
1228 u32 stat_len;
1229
1230 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
1231 rctx->gfp);
1232 if (!mssg->spu.src)
1233 return -ENOMEM;
1234
1235 sg = mssg->spu.src;
1236 sg_init_table(sg, tx_frag_num);
1237
1238 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1239 BCM_HDR_LEN + spu_hdr_len);
1240
1241 if (assoc_len) {
1242
1243 written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
1244 assoc_nents, assoc_len);
1245 if (written < assoc_len) {
1246 pr_err("%s(): failed to copy assoc sg to mbox msg",
1247 __func__);
1248 return -EFAULT;
1249 }
1250 }
1251
1252 if (aead_iv_len)
1253 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1254
1255 if (aad_pad_len) {
1256 memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1257 sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1258 }
1259
1260 datalen = chunksize;
1261 if ((chunksize > ctx->digestsize) && incl_icv)
1262 datalen -= ctx->digestsize;
1263 if (datalen) {
1264
1265 written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1266 rctx->src_nents, datalen);
1267 if (written < datalen) {
1268 pr_err("%s(): failed to copy src sg to mbox msg",
1269 __func__);
1270 return -EFAULT;
1271 }
1272 }
1273
1274 if (pad_len) {
1275 memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1276 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1277 }
1278
1279 if (incl_icv)
1280 sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1281
1282 stat_len = spu->spu_tx_status_len();
1283 if (stat_len) {
1284 memset(rctx->msg_buf.tx_stat, 0, stat_len);
1285 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1286 }
1287 return 0;
1288}
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307static int handle_aead_req(struct iproc_reqctx_s *rctx)
1308{
1309 struct spu_hw *spu = &iproc_priv.spu;
1310 struct crypto_async_request *areq = rctx->parent;
1311 struct aead_request *req = container_of(areq,
1312 struct aead_request, base);
1313 struct iproc_ctx_s *ctx = rctx->ctx;
1314 int err;
1315 unsigned int chunksize;
1316 unsigned int resp_len;
1317 u32 spu_hdr_len;
1318 u32 db_size;
1319 u32 stat_pad_len;
1320 u32 pad_len;
1321 struct brcm_message *mssg;
1322 struct spu_request_opts req_opts;
1323 struct spu_cipher_parms cipher_parms;
1324 struct spu_hash_parms hash_parms;
1325 struct spu_aead_parms aead_parms;
1326 int assoc_nents = 0;
1327 bool incl_icv = false;
1328 unsigned int digestsize = ctx->digestsize;
1329
1330
1331
1332 u8 rx_frag_num = 2;
1333 u8 tx_frag_num = 1;
1334
1335
1336 chunksize = rctx->total_todo;
1337
1338 flow_log("%s: chunksize %u\n", __func__, chunksize);
1339
1340 memset(&req_opts, 0, sizeof(req_opts));
1341 memset(&hash_parms, 0, sizeof(hash_parms));
1342 memset(&aead_parms, 0, sizeof(aead_parms));
1343
1344 req_opts.is_inbound = !(rctx->is_encrypt);
1345 req_opts.auth_first = ctx->auth_first;
1346 req_opts.is_aead = true;
1347 req_opts.is_esp = ctx->is_esp;
1348
1349 cipher_parms.alg = ctx->cipher.alg;
1350 cipher_parms.mode = ctx->cipher.mode;
1351 cipher_parms.type = ctx->cipher_type;
1352 cipher_parms.key_buf = ctx->enckey;
1353 cipher_parms.key_len = ctx->enckeylen;
1354 cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1355 cipher_parms.iv_len = rctx->iv_ctr_len;
1356
1357 hash_parms.alg = ctx->auth.alg;
1358 hash_parms.mode = ctx->auth.mode;
1359 hash_parms.type = HASH_TYPE_NONE;
1360 hash_parms.key_buf = (u8 *)ctx->authkey;
1361 hash_parms.key_len = ctx->authkeylen;
1362 hash_parms.digestsize = digestsize;
1363
1364 if ((ctx->auth.alg == HASH_ALG_SHA224) &&
1365 (ctx->authkeylen < SHA224_DIGEST_SIZE))
1366 hash_parms.key_len = SHA224_DIGEST_SIZE;
1367
1368 aead_parms.assoc_size = req->assoclen;
1369 if (ctx->is_esp && !ctx->is_rfc4543) {
1370
1371
1372
1373
1374
1375 aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
1376
1377 if (rctx->is_encrypt) {
1378 aead_parms.return_iv = true;
1379 aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
1380 aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
1381 }
1382 } else {
1383 aead_parms.ret_iv_len = 0;
1384 }
1385
1386
1387
1388
1389
1390
1391
1392 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1393 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1394 if (aead_parms.assoc_size)
1395 assoc_nents = spu_sg_count(rctx->assoc, 0,
1396 aead_parms.assoc_size);
1397
1398 mssg = &rctx->mb_mssg;
1399
1400 rctx->total_sent = chunksize;
1401 rctx->src_sent = chunksize;
1402 if (spu->spu_assoc_resp_len(ctx->cipher.mode,
1403 aead_parms.assoc_size,
1404 aead_parms.ret_iv_len,
1405 rctx->is_encrypt))
1406 rx_frag_num++;
1407
1408 aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
1409 rctx->iv_ctr_len);
1410
1411 if (ctx->auth.alg == HASH_ALG_AES)
1412 hash_parms.type = (enum hash_type)ctx->cipher_type;
1413
1414
1415 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1416 aead_parms.assoc_size);
1417
1418
1419 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1420 chunksize);
1421
1422 if (ctx->cipher.mode == CIPHER_MODE_CCM) {
1423
1424
1425
1426
1427 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
1428 ctx->cipher.mode,
1429 aead_parms.assoc_size + 2);
1430
1431
1432
1433
1434
1435 if (!rctx->is_encrypt)
1436 aead_parms.data_pad_len =
1437 spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1438 chunksize - digestsize);
1439
1440
1441 spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
1442 chunksize, rctx->is_encrypt,
1443 ctx->is_esp);
1444 }
1445
1446 if (ctx->is_rfc4543) {
1447
1448
1449
1450
1451 aead_parms.aad_pad_len = 0;
1452 if (!rctx->is_encrypt)
1453 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1454 ctx->cipher.mode,
1455 aead_parms.assoc_size + chunksize -
1456 digestsize);
1457 else
1458 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1459 ctx->cipher.mode,
1460 aead_parms.assoc_size + chunksize);
1461
1462 req_opts.is_rfc4543 = true;
1463 }
1464
1465 if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1466 incl_icv = true;
1467 tx_frag_num++;
1468
1469 sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1470 req->assoclen + rctx->total_sent -
1471 digestsize);
1472 }
1473
1474 atomic64_add(chunksize, &iproc_priv.bytes_out);
1475
1476 flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
1477
1478
1479 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1480
1481 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1482 BCM_HDR_LEN, &req_opts,
1483 &cipher_parms, &hash_parms,
1484 &aead_parms, chunksize);
1485
1486
1487 db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
1488 chunksize, aead_parms.aad_pad_len,
1489 aead_parms.data_pad_len, 0);
1490
1491 stat_pad_len = spu->spu_wordalign_padlen(db_size);
1492
1493 if (stat_pad_len)
1494 rx_frag_num++;
1495 pad_len = aead_parms.data_pad_len + stat_pad_len;
1496 if (pad_len) {
1497 tx_frag_num++;
1498 spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1499 aead_parms.data_pad_len, 0,
1500 ctx->auth.alg, ctx->auth.mode,
1501 rctx->total_sent, stat_pad_len);
1502 }
1503
1504 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1505 spu_hdr_len);
1506 dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1507 packet_dump(" aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1508 packet_log("BD:\n");
1509 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1510 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1511
1512
1513
1514
1515
1516 memset(mssg, 0, sizeof(*mssg));
1517 mssg->type = BRCM_MESSAGE_SPU;
1518 mssg->ctx = rctx;
1519
1520
1521 rx_frag_num += rctx->dst_nents;
1522 resp_len = chunksize;
1523
1524
1525
1526
1527
1528
1529 rx_frag_num++;
1530
1531 if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
1532 (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1533
1534
1535
1536
1537 resp_len -= ctx->digestsize;
1538 if (resp_len == 0)
1539
1540 rx_frag_num -= rctx->dst_nents;
1541 }
1542
1543 err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1544 aead_parms.assoc_size,
1545 aead_parms.ret_iv_len, resp_len, digestsize,
1546 stat_pad_len);
1547 if (err)
1548 return err;
1549
1550
1551 tx_frag_num += rctx->src_nents;
1552 tx_frag_num += assoc_nents;
1553 if (aead_parms.aad_pad_len)
1554 tx_frag_num++;
1555 if (aead_parms.iv_len)
1556 tx_frag_num++;
1557 if (spu->spu_tx_status_len())
1558 tx_frag_num++;
1559 err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1560 rctx->assoc, aead_parms.assoc_size,
1561 assoc_nents, aead_parms.iv_len, chunksize,
1562 aead_parms.aad_pad_len, pad_len, incl_icv);
1563 if (err)
1564 return err;
1565
1566 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
1567 if (unlikely(err < 0))
1568 return err;
1569
1570 return -EINPROGRESS;
1571}
1572
1573
1574
1575
1576
1577static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1578{
1579 struct spu_hw *spu = &iproc_priv.spu;
1580 struct crypto_async_request *areq = rctx->parent;
1581 struct aead_request *req = container_of(areq,
1582 struct aead_request, base);
1583 struct iproc_ctx_s *ctx = rctx->ctx;
1584 u32 payload_len;
1585 unsigned int icv_offset;
1586 u32 result_len;
1587
1588
1589 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1590 flow_log("payload_len %u\n", payload_len);
1591
1592
1593 atomic64_add(payload_len, &iproc_priv.bytes_in);
1594
1595 if (req->assoclen)
1596 packet_dump(" assoc_data ", rctx->msg_buf.a.resp_aad,
1597 req->assoclen);
1598
1599
1600
1601
1602
1603
1604 result_len = req->cryptlen;
1605 if (rctx->is_encrypt) {
1606 icv_offset = req->assoclen + rctx->total_sent;
1607 packet_dump(" ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1608 flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
1609 sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1610 ctx->digestsize, icv_offset);
1611 result_len += ctx->digestsize;
1612 }
1613
1614 packet_log("response data: ");
1615 dump_sg(req->dst, req->assoclen, result_len);
1616
1617 atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
1618 if (ctx->cipher.alg == CIPHER_ALG_AES) {
1619 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1620 atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
1621 else if (ctx->cipher.mode == CIPHER_MODE_GCM)
1622 atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
1623 else
1624 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1625 } else {
1626 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1627 }
1628}
1629
1630
1631
1632
1633
1634
1635
1636
1637static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1638{
1639
1640 struct brcm_message *mssg = &rctx->mb_mssg;
1641
1642 kfree(mssg->spu.src);
1643 kfree(mssg->spu.dst);
1644 memset(mssg, 0, sizeof(struct brcm_message));
1645}
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655static void finish_req(struct iproc_reqctx_s *rctx, int err)
1656{
1657 struct crypto_async_request *areq = rctx->parent;
1658
1659 flow_log("%s() err:%d\n\n", __func__, err);
1660
1661
1662 spu_chunk_cleanup(rctx);
1663
1664 if (areq)
1665 areq->complete(areq, err);
1666}
1667
1668
1669
1670
1671
1672
1673static void spu_rx_callback(struct mbox_client *cl, void *msg)
1674{
1675 struct spu_hw *spu = &iproc_priv.spu;
1676 struct brcm_message *mssg = msg;
1677 struct iproc_reqctx_s *rctx;
1678 struct iproc_ctx_s *ctx;
1679 struct crypto_async_request *areq;
1680 int err = 0;
1681
1682 rctx = mssg->ctx;
1683 if (unlikely(!rctx)) {
1684
1685 pr_err("%s(): no request context", __func__);
1686 err = -EFAULT;
1687 goto cb_finish;
1688 }
1689 areq = rctx->parent;
1690 ctx = rctx->ctx;
1691
1692
1693 err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1694 if (err != 0) {
1695 if (err == SPU_INVALID_ICV)
1696 atomic_inc(&iproc_priv.bad_icv);
1697 err = -EBADMSG;
1698 goto cb_finish;
1699 }
1700
1701
1702 switch (rctx->ctx->alg->type) {
1703 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1704 handle_ablkcipher_resp(rctx);
1705 break;
1706 case CRYPTO_ALG_TYPE_AHASH:
1707 handle_ahash_resp(rctx);
1708 break;
1709 case CRYPTO_ALG_TYPE_AEAD:
1710 handle_aead_resp(rctx);
1711 break;
1712 default:
1713 err = -EINVAL;
1714 goto cb_finish;
1715 }
1716
1717
1718
1719
1720
1721 if (rctx->total_sent < rctx->total_todo) {
1722
1723 spu_chunk_cleanup(rctx);
1724
1725 switch (rctx->ctx->alg->type) {
1726 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1727 err = handle_ablkcipher_req(rctx);
1728 break;
1729 case CRYPTO_ALG_TYPE_AHASH:
1730 err = handle_ahash_req(rctx);
1731 if (err == -EAGAIN)
1732
1733
1734
1735
1736 err = 0;
1737 break;
1738 case CRYPTO_ALG_TYPE_AEAD:
1739 err = handle_aead_req(rctx);
1740 break;
1741 default:
1742 err = -EINVAL;
1743 }
1744
1745 if (err == -EINPROGRESS)
1746
1747 return;
1748 }
1749
1750cb_finish:
1751 finish_req(rctx, err);
1752}
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
1766{
1767 struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req);
1768 struct iproc_ctx_s *ctx =
1769 crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
1770 int err;
1771
1772 flow_log("%s() enc:%u\n", __func__, encrypt);
1773
1774 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1775 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1776 rctx->parent = &req->base;
1777 rctx->is_encrypt = encrypt;
1778 rctx->bd_suppress = false;
1779 rctx->total_todo = req->nbytes;
1780 rctx->src_sent = 0;
1781 rctx->total_sent = 0;
1782 rctx->total_received = 0;
1783 rctx->ctx = ctx;
1784
1785
1786 rctx->src_sg = req->src;
1787 rctx->src_nents = 0;
1788 rctx->src_skip = 0;
1789 rctx->dst_sg = req->dst;
1790 rctx->dst_nents = 0;
1791 rctx->dst_skip = 0;
1792
1793 if (ctx->cipher.mode == CIPHER_MODE_CBC ||
1794 ctx->cipher.mode == CIPHER_MODE_CTR ||
1795 ctx->cipher.mode == CIPHER_MODE_OFB ||
1796 ctx->cipher.mode == CIPHER_MODE_XTS ||
1797 ctx->cipher.mode == CIPHER_MODE_GCM ||
1798 ctx->cipher.mode == CIPHER_MODE_CCM) {
1799 rctx->iv_ctr_len =
1800 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
1801 memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len);
1802 } else {
1803 rctx->iv_ctr_len = 0;
1804 }
1805
1806
1807 rctx->chan_idx = select_channel();
1808 err = handle_ablkcipher_req(rctx);
1809 if (err != -EINPROGRESS)
1810
1811 spu_chunk_cleanup(rctx);
1812
1813 return err;
1814}
1815
1816static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1817 unsigned int keylen)
1818{
1819 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1820 u32 tmp[DES_EXPKEY_WORDS];
1821
1822 if (keylen == DES_KEY_SIZE) {
1823 if (des_ekey(tmp, key) == 0) {
1824 if (crypto_ablkcipher_get_flags(cipher) &
1825 CRYPTO_TFM_REQ_WEAK_KEY) {
1826 u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
1827
1828 crypto_ablkcipher_set_flags(cipher, flags);
1829 return -EINVAL;
1830 }
1831 }
1832
1833 ctx->cipher_type = CIPHER_TYPE_DES;
1834 } else {
1835 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1836 return -EINVAL;
1837 }
1838 return 0;
1839}
1840
1841static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1842 unsigned int keylen)
1843{
1844 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1845
1846 if (keylen == (DES_KEY_SIZE * 3)) {
1847 const u32 *K = (const u32 *)key;
1848 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
1849
1850 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
1851 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
1852 crypto_ablkcipher_set_flags(cipher, flags);
1853 return -EINVAL;
1854 }
1855
1856 ctx->cipher_type = CIPHER_TYPE_3DES;
1857 } else {
1858 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1859 return -EINVAL;
1860 }
1861 return 0;
1862}
1863
1864static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1865 unsigned int keylen)
1866{
1867 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1868
1869 if (ctx->cipher.mode == CIPHER_MODE_XTS)
1870
1871 keylen = keylen / 2;
1872
1873 switch (keylen) {
1874 case AES_KEYSIZE_128:
1875 ctx->cipher_type = CIPHER_TYPE_AES128;
1876 break;
1877 case AES_KEYSIZE_192:
1878 ctx->cipher_type = CIPHER_TYPE_AES192;
1879 break;
1880 case AES_KEYSIZE_256:
1881 ctx->cipher_type = CIPHER_TYPE_AES256;
1882 break;
1883 default:
1884 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1885 return -EINVAL;
1886 }
1887 WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
1888 ((ctx->max_payload % AES_BLOCK_SIZE) != 0));
1889 return 0;
1890}
1891
1892static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1893 unsigned int keylen)
1894{
1895 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1896 int i;
1897
1898 ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
1899
1900 ctx->enckey[0] = 0x00;
1901 ctx->enckey[1] = 0x00;
1902 ctx->enckey[2] = 0x00;
1903 ctx->enckey[3] = 0x00;
1904 for (i = 0; i < ARC4_MAX_KEY_SIZE; i++)
1905 ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen];
1906
1907 ctx->cipher_type = CIPHER_TYPE_INIT;
1908
1909 return 0;
1910}
1911
1912static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1913 unsigned int keylen)
1914{
1915 struct spu_hw *spu = &iproc_priv.spu;
1916 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1917 struct spu_cipher_parms cipher_parms;
1918 u32 alloc_len = 0;
1919 int err;
1920
1921 flow_log("ablkcipher_setkey() keylen: %d\n", keylen);
1922 flow_dump(" key: ", key, keylen);
1923
1924 switch (ctx->cipher.alg) {
1925 case CIPHER_ALG_DES:
1926 err = des_setkey(cipher, key, keylen);
1927 break;
1928 case CIPHER_ALG_3DES:
1929 err = threedes_setkey(cipher, key, keylen);
1930 break;
1931 case CIPHER_ALG_AES:
1932 err = aes_setkey(cipher, key, keylen);
1933 break;
1934 case CIPHER_ALG_RC4:
1935 err = rc4_setkey(cipher, key, keylen);
1936 break;
1937 default:
1938 pr_err("%s() Error: unknown cipher alg\n", __func__);
1939 err = -EINVAL;
1940 }
1941 if (err)
1942 return err;
1943
1944
1945 if (ctx->cipher.alg != CIPHER_ALG_RC4) {
1946 memcpy(ctx->enckey, key, keylen);
1947 ctx->enckeylen = keylen;
1948 }
1949
1950 if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
1951 (ctx->cipher.mode == CIPHER_MODE_XTS)) {
1952 unsigned int xts_keylen = keylen / 2;
1953
1954 memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
1955 memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
1956 }
1957
1958 if (spu->spu_type == SPU_TYPE_SPUM)
1959 alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
1960 else if (spu->spu_type == SPU_TYPE_SPU2)
1961 alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
1962 memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
1963 cipher_parms.iv_buf = NULL;
1964 cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher);
1965 flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
1966
1967 cipher_parms.alg = ctx->cipher.alg;
1968 cipher_parms.mode = ctx->cipher.mode;
1969 cipher_parms.type = ctx->cipher_type;
1970 cipher_parms.key_buf = ctx->enckey;
1971 cipher_parms.key_len = ctx->enckeylen;
1972
1973
1974 memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1975 ctx->spu_req_hdr_len =
1976 spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
1977 &cipher_parms);
1978
1979 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
1980 ctx->enckeylen,
1981 false);
1982
1983 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
1984
1985 return 0;
1986}
1987
1988static int ablkcipher_encrypt(struct ablkcipher_request *req)
1989{
1990 flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes);
1991
1992 return ablkcipher_enqueue(req, true);
1993}
1994
1995static int ablkcipher_decrypt(struct ablkcipher_request *req)
1996{
1997 flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes);
1998 return ablkcipher_enqueue(req, false);
1999}
2000
2001static int ahash_enqueue(struct ahash_request *req)
2002{
2003 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2004 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2005 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2006 int err = 0;
2007 const char *alg_name;
2008
2009 flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
2010
2011 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2012 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2013 rctx->parent = &req->base;
2014 rctx->ctx = ctx;
2015 rctx->bd_suppress = true;
2016 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2017
2018
2019 rctx->src_sg = req->src;
2020 rctx->src_skip = 0;
2021 rctx->src_nents = 0;
2022 rctx->dst_sg = NULL;
2023 rctx->dst_skip = 0;
2024 rctx->dst_nents = 0;
2025
2026
2027 if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
2028 (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
2029 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2030 flow_log("Doing %sfinal %s zero-len hash request in software\n",
2031 rctx->is_final ? "" : "non-", alg_name);
2032 err = do_shash((unsigned char *)alg_name, req->result,
2033 NULL, 0, NULL, 0, ctx->authkey,
2034 ctx->authkeylen);
2035 if (err < 0)
2036 flow_log("Hash request failed with error %d\n", err);
2037 return err;
2038 }
2039
2040 rctx->chan_idx = select_channel();
2041
2042 err = handle_ahash_req(rctx);
2043 if (err != -EINPROGRESS)
2044
2045 spu_chunk_cleanup(rctx);
2046
2047 if (err == -EAGAIN)
2048
2049
2050
2051
2052 err = 0;
2053
2054 return err;
2055}
2056
2057static int __ahash_init(struct ahash_request *req)
2058{
2059 struct spu_hw *spu = &iproc_priv.spu;
2060 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2061 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2062 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2063
2064 flow_log("%s()\n", __func__);
2065
2066
2067 rctx->hash_carry_len = 0;
2068 rctx->is_final = 0;
2069
2070 rctx->total_todo = 0;
2071 rctx->src_sent = 0;
2072 rctx->total_sent = 0;
2073 rctx->total_received = 0;
2074
2075 ctx->digestsize = crypto_ahash_digestsize(tfm);
2076
2077 WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
2078
2079 rctx->is_sw_hmac = false;
2080
2081 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
2082 true);
2083
2084 return 0;
2085}
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2101{
2102 struct spu_hw *spu = &iproc_priv.spu;
2103
2104 if (spu->spu_type == SPU_TYPE_SPU2)
2105 return true;
2106
2107 if ((ctx->auth.alg == HASH_ALG_AES) &&
2108 (ctx->auth.mode == HASH_MODE_XCBC))
2109 return true;
2110
2111
2112 return false;
2113}
2114
2115static int ahash_init(struct ahash_request *req)
2116{
2117 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2118 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2119 const char *alg_name;
2120 struct crypto_shash *hash;
2121 int ret;
2122 gfp_t gfp;
2123
2124 if (spu_no_incr_hash(ctx)) {
2125
2126
2127
2128
2129
2130 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2131 hash = crypto_alloc_shash(alg_name, 0, 0);
2132 if (IS_ERR(hash)) {
2133 ret = PTR_ERR(hash);
2134 goto err;
2135 }
2136
2137 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2138 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2139 ctx->shash = kmalloc(sizeof(*ctx->shash) +
2140 crypto_shash_descsize(hash), gfp);
2141 if (!ctx->shash) {
2142 ret = -ENOMEM;
2143 goto err_hash;
2144 }
2145 ctx->shash->tfm = hash;
2146 ctx->shash->flags = 0;
2147
2148
2149 if (ctx->authkeylen > 0) {
2150 ret = crypto_shash_setkey(hash, ctx->authkey,
2151 ctx->authkeylen);
2152 if (ret)
2153 goto err_shash;
2154 }
2155
2156
2157 ret = crypto_shash_init(ctx->shash);
2158 if (ret)
2159 goto err_shash;
2160 } else {
2161
2162 ret = __ahash_init(req);
2163 }
2164
2165 return ret;
2166
2167err_shash:
2168 kfree(ctx->shash);
2169err_hash:
2170 crypto_free_shash(hash);
2171err:
2172 return ret;
2173}
2174
2175static int __ahash_update(struct ahash_request *req)
2176{
2177 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2178
2179 flow_log("ahash_update() nbytes:%u\n", req->nbytes);
2180
2181 if (!req->nbytes)
2182 return 0;
2183 rctx->total_todo += req->nbytes;
2184 rctx->src_sent = 0;
2185
2186 return ahash_enqueue(req);
2187}
2188
2189static int ahash_update(struct ahash_request *req)
2190{
2191 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2192 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2193 u8 *tmpbuf;
2194 int ret;
2195 int nents;
2196 gfp_t gfp;
2197
2198 if (spu_no_incr_hash(ctx)) {
2199
2200
2201
2202
2203
2204 if (req->src)
2205 nents = sg_nents(req->src);
2206 else
2207 return -EINVAL;
2208
2209
2210 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2211 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2212 tmpbuf = kmalloc(req->nbytes, gfp);
2213 if (!tmpbuf)
2214 return -ENOMEM;
2215
2216 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2217 req->nbytes) {
2218 kfree(tmpbuf);
2219 return -EINVAL;
2220 }
2221
2222
2223 ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
2224 kfree(tmpbuf);
2225 } else {
2226
2227 ret = __ahash_update(req);
2228 }
2229
2230 return ret;
2231}
2232
2233static int __ahash_final(struct ahash_request *req)
2234{
2235 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2236
2237 flow_log("ahash_final() nbytes:%u\n", req->nbytes);
2238
2239 rctx->is_final = 1;
2240
2241 return ahash_enqueue(req);
2242}
2243
2244static int ahash_final(struct ahash_request *req)
2245{
2246 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2247 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2248 int ret;
2249
2250 if (spu_no_incr_hash(ctx)) {
2251
2252
2253
2254
2255
2256 ret = crypto_shash_final(ctx->shash, req->result);
2257
2258
2259 crypto_free_shash(ctx->shash->tfm);
2260 kfree(ctx->shash);
2261
2262 } else {
2263
2264 ret = __ahash_final(req);
2265 }
2266
2267 return ret;
2268}
2269
2270static int __ahash_finup(struct ahash_request *req)
2271{
2272 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2273
2274 flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
2275
2276 rctx->total_todo += req->nbytes;
2277 rctx->src_sent = 0;
2278 rctx->is_final = 1;
2279
2280 return ahash_enqueue(req);
2281}
2282
2283static int ahash_finup(struct ahash_request *req)
2284{
2285 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2286 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2287 u8 *tmpbuf;
2288 int ret;
2289 int nents;
2290 gfp_t gfp;
2291
2292 if (spu_no_incr_hash(ctx)) {
2293
2294
2295
2296
2297
2298 if (req->src) {
2299 nents = sg_nents(req->src);
2300 } else {
2301 ret = -EINVAL;
2302 goto ahash_finup_exit;
2303 }
2304
2305
2306 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2307 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2308 tmpbuf = kmalloc(req->nbytes, gfp);
2309 if (!tmpbuf) {
2310 ret = -ENOMEM;
2311 goto ahash_finup_exit;
2312 }
2313
2314 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2315 req->nbytes) {
2316 ret = -EINVAL;
2317 goto ahash_finup_free;
2318 }
2319
2320
2321 ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
2322 req->result);
2323 } else {
2324
2325 return __ahash_finup(req);
2326 }
2327ahash_finup_free:
2328 kfree(tmpbuf);
2329
2330ahash_finup_exit:
2331
2332 crypto_free_shash(ctx->shash->tfm);
2333 kfree(ctx->shash);
2334 return ret;
2335}
2336
2337static int ahash_digest(struct ahash_request *req)
2338{
2339 int err = 0;
2340
2341 flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
2342
2343
2344 err = __ahash_init(req);
2345 if (!err)
2346 err = __ahash_finup(req);
2347
2348 return err;
2349}
2350
2351static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2352 unsigned int keylen)
2353{
2354 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2355
2356 flow_log("%s() ahash:%p key:%p keylen:%u\n",
2357 __func__, ahash, key, keylen);
2358 flow_dump(" key: ", key, keylen);
2359
2360 if (ctx->auth.alg == HASH_ALG_AES) {
2361 switch (keylen) {
2362 case AES_KEYSIZE_128:
2363 ctx->cipher_type = CIPHER_TYPE_AES128;
2364 break;
2365 case AES_KEYSIZE_192:
2366 ctx->cipher_type = CIPHER_TYPE_AES192;
2367 break;
2368 case AES_KEYSIZE_256:
2369 ctx->cipher_type = CIPHER_TYPE_AES256;
2370 break;
2371 default:
2372 pr_err("%s() Error: Invalid key length\n", __func__);
2373 return -EINVAL;
2374 }
2375 } else {
2376 pr_err("%s() Error: unknown hash alg\n", __func__);
2377 return -EINVAL;
2378 }
2379 memcpy(ctx->authkey, key, keylen);
2380 ctx->authkeylen = keylen;
2381
2382 return 0;
2383}
2384
2385static int ahash_export(struct ahash_request *req, void *out)
2386{
2387 const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2388 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
2389
2390 spu_exp->total_todo = rctx->total_todo;
2391 spu_exp->total_sent = rctx->total_sent;
2392 spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2393 memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2394 spu_exp->hash_carry_len = rctx->hash_carry_len;
2395 memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2396
2397 return 0;
2398}
2399
2400static int ahash_import(struct ahash_request *req, const void *in)
2401{
2402 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2403 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
2404
2405 rctx->total_todo = spu_exp->total_todo;
2406 rctx->total_sent = spu_exp->total_sent;
2407 rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2408 memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2409 rctx->hash_carry_len = spu_exp->hash_carry_len;
2410 memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2411
2412 return 0;
2413}
2414
2415static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
2416 unsigned int keylen)
2417{
2418 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2419 unsigned int blocksize =
2420 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
2421 unsigned int digestsize = crypto_ahash_digestsize(ahash);
2422 unsigned int index;
2423 int rc;
2424
2425 flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2426 __func__, ahash, key, keylen, blocksize, digestsize);
2427 flow_dump(" key: ", key, keylen);
2428
2429 if (keylen > blocksize) {
2430 switch (ctx->auth.alg) {
2431 case HASH_ALG_MD5:
2432 rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
2433 0, NULL, 0);
2434 break;
2435 case HASH_ALG_SHA1:
2436 rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
2437 0, NULL, 0);
2438 break;
2439 case HASH_ALG_SHA224:
2440 rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
2441 0, NULL, 0);
2442 break;
2443 case HASH_ALG_SHA256:
2444 rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
2445 0, NULL, 0);
2446 break;
2447 case HASH_ALG_SHA384:
2448 rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
2449 0, NULL, 0);
2450 break;
2451 case HASH_ALG_SHA512:
2452 rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
2453 0, NULL, 0);
2454 break;
2455 case HASH_ALG_SHA3_224:
2456 rc = do_shash("sha3-224", ctx->authkey, key, keylen,
2457 NULL, 0, NULL, 0);
2458 break;
2459 case HASH_ALG_SHA3_256:
2460 rc = do_shash("sha3-256", ctx->authkey, key, keylen,
2461 NULL, 0, NULL, 0);
2462 break;
2463 case HASH_ALG_SHA3_384:
2464 rc = do_shash("sha3-384", ctx->authkey, key, keylen,
2465 NULL, 0, NULL, 0);
2466 break;
2467 case HASH_ALG_SHA3_512:
2468 rc = do_shash("sha3-512", ctx->authkey, key, keylen,
2469 NULL, 0, NULL, 0);
2470 break;
2471 default:
2472 pr_err("%s() Error: unknown hash alg\n", __func__);
2473 return -EINVAL;
2474 }
2475 if (rc < 0) {
2476 pr_err("%s() Error %d computing shash for %s\n",
2477 __func__, rc, hash_alg_name[ctx->auth.alg]);
2478 return rc;
2479 }
2480 ctx->authkeylen = digestsize;
2481
2482 flow_log(" keylen > digestsize... hashed\n");
2483 flow_dump(" newkey: ", ctx->authkey, ctx->authkeylen);
2484 } else {
2485 memcpy(ctx->authkey, key, keylen);
2486 ctx->authkeylen = keylen;
2487 }
2488
2489
2490
2491
2492
2493
2494 if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
2495 memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
2496 memset(ctx->ipad + ctx->authkeylen, 0,
2497 blocksize - ctx->authkeylen);
2498 ctx->authkeylen = 0;
2499 memcpy(ctx->opad, ctx->ipad, blocksize);
2500
2501 for (index = 0; index < blocksize; index++) {
2502 ctx->ipad[index] ^= HMAC_IPAD_VALUE;
2503 ctx->opad[index] ^= HMAC_OPAD_VALUE;
2504 }
2505
2506 flow_dump(" ipad: ", ctx->ipad, blocksize);
2507 flow_dump(" opad: ", ctx->opad, blocksize);
2508 }
2509 ctx->digestsize = digestsize;
2510 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
2511
2512 return 0;
2513}
2514
2515static int ahash_hmac_init(struct ahash_request *req)
2516{
2517 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2518 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2519 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2520 unsigned int blocksize =
2521 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2522
2523 flow_log("ahash_hmac_init()\n");
2524
2525
2526 ahash_init(req);
2527
2528 if (!spu_no_incr_hash(ctx)) {
2529
2530 rctx->is_sw_hmac = true;
2531 ctx->auth.mode = HASH_MODE_HASH;
2532
2533 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2534 rctx->hash_carry_len = blocksize;
2535 rctx->total_todo += blocksize;
2536 }
2537
2538 return 0;
2539}
2540
2541static int ahash_hmac_update(struct ahash_request *req)
2542{
2543 flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
2544
2545 if (!req->nbytes)
2546 return 0;
2547
2548 return ahash_update(req);
2549}
2550
2551static int ahash_hmac_final(struct ahash_request *req)
2552{
2553 flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
2554
2555 return ahash_final(req);
2556}
2557
2558static int ahash_hmac_finup(struct ahash_request *req)
2559{
2560 flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
2561
2562 return ahash_finup(req);
2563}
2564
2565static int ahash_hmac_digest(struct ahash_request *req)
2566{
2567 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2568 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2569 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2570 unsigned int blocksize =
2571 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2572
2573 flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
2574
2575
2576 __ahash_init(req);
2577
2578 if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
2579
2580
2581
2582
2583
2584
2585
2586
2587 rctx->is_sw_hmac = false;
2588 ctx->auth.mode = HASH_MODE_HMAC;
2589 } else {
2590 rctx->is_sw_hmac = true;
2591 ctx->auth.mode = HASH_MODE_HASH;
2592
2593 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2594 rctx->hash_carry_len = blocksize;
2595 rctx->total_todo += blocksize;
2596 }
2597
2598 return __ahash_finup(req);
2599}
2600
2601
2602
2603static int aead_need_fallback(struct aead_request *req)
2604{
2605 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2606 struct spu_hw *spu = &iproc_priv.spu;
2607 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2608 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2609 u32 payload_len;
2610
2611
2612
2613
2614
2615 if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
2616 (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
2617 (req->assoclen == 0)) {
2618 if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2619 (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2620 flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2621 return 1;
2622 }
2623 }
2624
2625
2626 if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2627 (spu->spu_type == SPU_TYPE_SPUM) &&
2628 (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
2629 (ctx->digestsize != 16)) {
2630 flow_log("%s() AES CCM needs fallback for digest size %d\n",
2631 __func__, ctx->digestsize);
2632 return 1;
2633 }
2634
2635
2636
2637
2638
2639 if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2640 (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
2641 (req->assoclen == 0)) {
2642 flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2643 __func__);
2644 return 1;
2645 }
2646
2647 payload_len = req->cryptlen;
2648 if (spu->spu_type == SPU_TYPE_SPUM)
2649 payload_len += req->assoclen;
2650
2651 flow_log("%s() payload len: %u\n", __func__, payload_len);
2652
2653 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2654 return 0;
2655 else
2656 return payload_len > ctx->max_payload;
2657}
2658
2659static void aead_complete(struct crypto_async_request *areq, int err)
2660{
2661 struct aead_request *req =
2662 container_of(areq, struct aead_request, base);
2663 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2664 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2665
2666 flow_log("%s() err:%d\n", __func__, err);
2667
2668 areq->tfm = crypto_aead_tfm(aead);
2669
2670 areq->complete = rctx->old_complete;
2671 areq->data = rctx->old_data;
2672
2673 areq->complete(areq, err);
2674}
2675
2676static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
2677{
2678 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2679 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
2680 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2681 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
2682 int err;
2683 u32 req_flags;
2684
2685 flow_log("%s() enc:%u\n", __func__, is_encrypt);
2686
2687 if (ctx->fallback_cipher) {
2688
2689 rctx->old_tfm = tfm;
2690 aead_request_set_tfm(req, ctx->fallback_cipher);
2691
2692
2693
2694
2695 rctx->old_complete = req->base.complete;
2696 rctx->old_data = req->base.data;
2697 req_flags = aead_request_flags(req);
2698 aead_request_set_callback(req, req_flags, aead_complete, req);
2699 err = is_encrypt ? crypto_aead_encrypt(req) :
2700 crypto_aead_decrypt(req);
2701
2702 if (err == 0) {
2703
2704
2705
2706
2707 aead_request_set_callback(req, req_flags,
2708 rctx->old_complete, req);
2709 req->base.data = rctx->old_data;
2710 aead_request_set_tfm(req, aead);
2711 flow_log("%s() fallback completed successfully\n\n",
2712 __func__);
2713 }
2714 } else {
2715 err = -EINVAL;
2716 }
2717
2718 return err;
2719}
2720
2721static int aead_enqueue(struct aead_request *req, bool is_encrypt)
2722{
2723 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2724 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2725 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2726 int err;
2727
2728 flow_log("%s() enc:%u\n", __func__, is_encrypt);
2729
2730 if (req->assoclen > MAX_ASSOC_SIZE) {
2731 pr_err
2732 ("%s() Error: associated data too long. (%u > %u bytes)\n",
2733 __func__, req->assoclen, MAX_ASSOC_SIZE);
2734 return -EINVAL;
2735 }
2736
2737 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2738 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2739 rctx->parent = &req->base;
2740 rctx->is_encrypt = is_encrypt;
2741 rctx->bd_suppress = false;
2742 rctx->total_todo = req->cryptlen;
2743 rctx->src_sent = 0;
2744 rctx->total_sent = 0;
2745 rctx->total_received = 0;
2746 rctx->is_sw_hmac = false;
2747 rctx->ctx = ctx;
2748 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2749
2750
2751 rctx->assoc = req->src;
2752
2753
2754
2755
2756
2757
2758 if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2759 &rctx->src_skip) < 0) {
2760 pr_err("%s() Error: Unable to find start of src data\n",
2761 __func__);
2762 return -EINVAL;
2763 }
2764
2765 rctx->src_nents = 0;
2766 rctx->dst_nents = 0;
2767 if (req->dst == req->src) {
2768 rctx->dst_sg = rctx->src_sg;
2769 rctx->dst_skip = rctx->src_skip;
2770 } else {
2771
2772
2773
2774
2775
2776 if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2777 &rctx->dst_skip) < 0) {
2778 pr_err("%s() Error: Unable to find start of dst data\n",
2779 __func__);
2780 return -EINVAL;
2781 }
2782 }
2783
2784 if (ctx->cipher.mode == CIPHER_MODE_CBC ||
2785 ctx->cipher.mode == CIPHER_MODE_CTR ||
2786 ctx->cipher.mode == CIPHER_MODE_OFB ||
2787 ctx->cipher.mode == CIPHER_MODE_XTS ||
2788 ctx->cipher.mode == CIPHER_MODE_GCM) {
2789 rctx->iv_ctr_len =
2790 ctx->salt_len +
2791 crypto_aead_ivsize(crypto_aead_reqtfm(req));
2792 } else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
2793 rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2794 } else {
2795 rctx->iv_ctr_len = 0;
2796 }
2797
2798 rctx->hash_carry_len = 0;
2799
2800 flow_log(" src sg: %p\n", req->src);
2801 flow_log(" rctx->src_sg: %p, src_skip %u\n",
2802 rctx->src_sg, rctx->src_skip);
2803 flow_log(" assoc: %p, assoclen %u\n", rctx->assoc, req->assoclen);
2804 flow_log(" dst sg: %p\n", req->dst);
2805 flow_log(" rctx->dst_sg: %p, dst_skip %u\n",
2806 rctx->dst_sg, rctx->dst_skip);
2807 flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len);
2808 flow_dump(" iv: ", req->iv, rctx->iv_ctr_len);
2809 flow_log(" authkeylen:%u\n", ctx->authkeylen);
2810 flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no");
2811
2812 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2813 flow_log(" max_payload infinite");
2814 else
2815 flow_log(" max_payload: %u\n", ctx->max_payload);
2816
2817 if (unlikely(aead_need_fallback(req)))
2818 return aead_do_fallback(req, is_encrypt);
2819
2820
2821
2822
2823
2824 if (rctx->iv_ctr_len) {
2825 if (ctx->salt_len)
2826 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2827 ctx->salt, ctx->salt_len);
2828 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2829 req->iv,
2830 rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2831 }
2832
2833 rctx->chan_idx = select_channel();
2834 err = handle_aead_req(rctx);
2835 if (err != -EINPROGRESS)
2836
2837 spu_chunk_cleanup(rctx);
2838
2839 return err;
2840}
2841
2842static int aead_authenc_setkey(struct crypto_aead *cipher,
2843 const u8 *key, unsigned int keylen)
2844{
2845 struct spu_hw *spu = &iproc_priv.spu;
2846 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2847 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2848 struct rtattr *rta = (void *)key;
2849 struct crypto_authenc_key_param *param;
2850 const u8 *origkey = key;
2851 const unsigned int origkeylen = keylen;
2852
2853 int ret = 0;
2854
2855 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2856 keylen);
2857 flow_dump(" key: ", key, keylen);
2858
2859 if (!RTA_OK(rta, keylen))
2860 goto badkey;
2861 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
2862 goto badkey;
2863 if (RTA_PAYLOAD(rta) < sizeof(*param))
2864 goto badkey;
2865
2866 param = RTA_DATA(rta);
2867 ctx->enckeylen = be32_to_cpu(param->enckeylen);
2868
2869 key += RTA_ALIGN(rta->rta_len);
2870 keylen -= RTA_ALIGN(rta->rta_len);
2871
2872 if (keylen < ctx->enckeylen)
2873 goto badkey;
2874 if (ctx->enckeylen > MAX_KEY_SIZE)
2875 goto badkey;
2876
2877 ctx->authkeylen = keylen - ctx->enckeylen;
2878
2879 if (ctx->authkeylen > MAX_KEY_SIZE)
2880 goto badkey;
2881
2882 memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
2883
2884 memset(ctx->authkey, 0, sizeof(ctx->authkey));
2885 memcpy(ctx->authkey, key, ctx->authkeylen);
2886
2887 switch (ctx->alg->cipher_info.alg) {
2888 case CIPHER_ALG_DES:
2889 if (ctx->enckeylen == DES_KEY_SIZE) {
2890 u32 tmp[DES_EXPKEY_WORDS];
2891 u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
2892
2893 if (des_ekey(tmp, key) == 0) {
2894 if (crypto_aead_get_flags(cipher) &
2895 CRYPTO_TFM_REQ_WEAK_KEY) {
2896 crypto_aead_set_flags(cipher, flags);
2897 return -EINVAL;
2898 }
2899 }
2900
2901 ctx->cipher_type = CIPHER_TYPE_DES;
2902 } else {
2903 goto badkey;
2904 }
2905 break;
2906 case CIPHER_ALG_3DES:
2907 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
2908 const u32 *K = (const u32 *)key;
2909 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
2910
2911 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
2912 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
2913 crypto_aead_set_flags(cipher, flags);
2914 return -EINVAL;
2915 }
2916
2917 ctx->cipher_type = CIPHER_TYPE_3DES;
2918 } else {
2919 crypto_aead_set_flags(cipher,
2920 CRYPTO_TFM_RES_BAD_KEY_LEN);
2921 return -EINVAL;
2922 }
2923 break;
2924 case CIPHER_ALG_AES:
2925 switch (ctx->enckeylen) {
2926 case AES_KEYSIZE_128:
2927 ctx->cipher_type = CIPHER_TYPE_AES128;
2928 break;
2929 case AES_KEYSIZE_192:
2930 ctx->cipher_type = CIPHER_TYPE_AES192;
2931 break;
2932 case AES_KEYSIZE_256:
2933 ctx->cipher_type = CIPHER_TYPE_AES256;
2934 break;
2935 default:
2936 goto badkey;
2937 }
2938 break;
2939 case CIPHER_ALG_RC4:
2940 ctx->cipher_type = CIPHER_TYPE_INIT;
2941 break;
2942 default:
2943 pr_err("%s() Error: Unknown cipher alg\n", __func__);
2944 return -EINVAL;
2945 }
2946
2947 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2948 ctx->authkeylen);
2949 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
2950 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
2951
2952
2953 if (ctx->fallback_cipher) {
2954 flow_log(" running fallback setkey()\n");
2955
2956 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2957 ctx->fallback_cipher->base.crt_flags |=
2958 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2959 ret =
2960 crypto_aead_setkey(ctx->fallback_cipher, origkey,
2961 origkeylen);
2962 if (ret) {
2963 flow_log(" fallback setkey() returned:%d\n", ret);
2964 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
2965 tfm->crt_flags |=
2966 (ctx->fallback_cipher->base.crt_flags &
2967 CRYPTO_TFM_RES_MASK);
2968 }
2969 }
2970
2971 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2972 ctx->enckeylen,
2973 false);
2974
2975 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2976
2977 return ret;
2978
2979badkey:
2980 ctx->enckeylen = 0;
2981 ctx->authkeylen = 0;
2982 ctx->digestsize = 0;
2983
2984 crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2985 return -EINVAL;
2986}
2987
2988static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
2989 const u8 *key, unsigned int keylen)
2990{
2991 struct spu_hw *spu = &iproc_priv.spu;
2992 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2993 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2994
2995 int ret = 0;
2996
2997 flow_log("%s() keylen:%u\n", __func__, keylen);
2998 flow_dump(" key: ", key, keylen);
2999
3000 if (!ctx->is_esp)
3001 ctx->digestsize = keylen;
3002
3003 ctx->enckeylen = keylen;
3004 ctx->authkeylen = 0;
3005 memcpy(ctx->enckey, key, ctx->enckeylen);
3006
3007 switch (ctx->enckeylen) {
3008 case AES_KEYSIZE_128:
3009 ctx->cipher_type = CIPHER_TYPE_AES128;
3010 break;
3011 case AES_KEYSIZE_192:
3012 ctx->cipher_type = CIPHER_TYPE_AES192;
3013 break;
3014 case AES_KEYSIZE_256:
3015 ctx->cipher_type = CIPHER_TYPE_AES256;
3016 break;
3017 default:
3018 goto badkey;
3019 }
3020
3021 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
3022 ctx->authkeylen);
3023 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
3024 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
3025
3026
3027 if (ctx->fallback_cipher) {
3028 flow_log(" running fallback setkey()\n");
3029
3030 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
3031 ctx->fallback_cipher->base.crt_flags |=
3032 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
3033 ret = crypto_aead_setkey(ctx->fallback_cipher, key,
3034 keylen + ctx->salt_len);
3035 if (ret) {
3036 flow_log(" fallback setkey() returned:%d\n", ret);
3037 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
3038 tfm->crt_flags |=
3039 (ctx->fallback_cipher->base.crt_flags &
3040 CRYPTO_TFM_RES_MASK);
3041 }
3042 }
3043
3044 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
3045 ctx->enckeylen,
3046 false);
3047
3048 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
3049
3050 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
3051 ctx->authkeylen);
3052
3053 return ret;
3054
3055badkey:
3056 ctx->enckeylen = 0;
3057 ctx->authkeylen = 0;
3058 ctx->digestsize = 0;
3059
3060 crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
3061 return -EINVAL;
3062}
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
3076 const u8 *key, unsigned int keylen)
3077{
3078 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3079
3080 flow_log("%s\n", __func__);
3081 ctx->salt_len = GCM_ESP_SALT_SIZE;
3082 ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3083 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3084 keylen -= GCM_ESP_SALT_SIZE;
3085 ctx->digestsize = GCM_ESP_DIGESTSIZE;
3086 ctx->is_esp = true;
3087 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3088
3089 return aead_gcm_ccm_setkey(cipher, key, keylen);
3090}
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
3104 const u8 *key, unsigned int keylen)
3105{
3106 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3107
3108 flow_log("%s\n", __func__);
3109 ctx->salt_len = GCM_ESP_SALT_SIZE;
3110 ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3111 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3112 keylen -= GCM_ESP_SALT_SIZE;
3113 ctx->digestsize = GCM_ESP_DIGESTSIZE;
3114 ctx->is_esp = true;
3115 ctx->is_rfc4543 = true;
3116 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3117
3118 return aead_gcm_ccm_setkey(cipher, key, keylen);
3119}
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
3133 const u8 *key, unsigned int keylen)
3134{
3135 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3136
3137 flow_log("%s\n", __func__);
3138 ctx->salt_len = CCM_ESP_SALT_SIZE;
3139 ctx->salt_offset = CCM_ESP_SALT_OFFSET;
3140 memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
3141 keylen -= CCM_ESP_SALT_SIZE;
3142 ctx->is_esp = true;
3143 flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
3144
3145 return aead_gcm_ccm_setkey(cipher, key, keylen);
3146}
3147
3148static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
3149{
3150 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3151 int ret = 0;
3152
3153 flow_log("%s() authkeylen:%u authsize:%u\n",
3154 __func__, ctx->authkeylen, authsize);
3155
3156 ctx->digestsize = authsize;
3157
3158
3159 if (ctx->fallback_cipher) {
3160 flow_log(" running fallback setauth()\n");
3161
3162 ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
3163 if (ret)
3164 flow_log(" fallback setauth() returned:%d\n", ret);
3165 }
3166
3167 return ret;
3168}
3169
3170static int aead_encrypt(struct aead_request *req)
3171{
3172 flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
3173 req->cryptlen);
3174 dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3175 flow_log(" assoc_len:%u\n", req->assoclen);
3176
3177 return aead_enqueue(req, true);
3178}
3179
3180static int aead_decrypt(struct aead_request *req)
3181{
3182 flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
3183 dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3184 flow_log(" assoc_len:%u\n", req->assoclen);
3185
3186 return aead_enqueue(req, false);
3187}
3188
3189
3190
3191static struct iproc_alg_s driver_algs[] = {
3192 {
3193 .type = CRYPTO_ALG_TYPE_AEAD,
3194 .alg.aead = {
3195 .base = {
3196 .cra_name = "gcm(aes)",
3197 .cra_driver_name = "gcm-aes-iproc",
3198 .cra_blocksize = AES_BLOCK_SIZE,
3199 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3200 },
3201 .setkey = aead_gcm_ccm_setkey,
3202 .ivsize = GCM_AES_IV_SIZE,
3203 .maxauthsize = AES_BLOCK_SIZE,
3204 },
3205 .cipher_info = {
3206 .alg = CIPHER_ALG_AES,
3207 .mode = CIPHER_MODE_GCM,
3208 },
3209 .auth_info = {
3210 .alg = HASH_ALG_AES,
3211 .mode = HASH_MODE_GCM,
3212 },
3213 .auth_first = 0,
3214 },
3215 {
3216 .type = CRYPTO_ALG_TYPE_AEAD,
3217 .alg.aead = {
3218 .base = {
3219 .cra_name = "ccm(aes)",
3220 .cra_driver_name = "ccm-aes-iproc",
3221 .cra_blocksize = AES_BLOCK_SIZE,
3222 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3223 },
3224 .setkey = aead_gcm_ccm_setkey,
3225 .ivsize = CCM_AES_IV_SIZE,
3226 .maxauthsize = AES_BLOCK_SIZE,
3227 },
3228 .cipher_info = {
3229 .alg = CIPHER_ALG_AES,
3230 .mode = CIPHER_MODE_CCM,
3231 },
3232 .auth_info = {
3233 .alg = HASH_ALG_AES,
3234 .mode = HASH_MODE_CCM,
3235 },
3236 .auth_first = 0,
3237 },
3238 {
3239 .type = CRYPTO_ALG_TYPE_AEAD,
3240 .alg.aead = {
3241 .base = {
3242 .cra_name = "rfc4106(gcm(aes))",
3243 .cra_driver_name = "gcm-aes-esp-iproc",
3244 .cra_blocksize = AES_BLOCK_SIZE,
3245 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3246 },
3247 .setkey = aead_gcm_esp_setkey,
3248 .ivsize = GCM_RFC4106_IV_SIZE,
3249 .maxauthsize = AES_BLOCK_SIZE,
3250 },
3251 .cipher_info = {
3252 .alg = CIPHER_ALG_AES,
3253 .mode = CIPHER_MODE_GCM,
3254 },
3255 .auth_info = {
3256 .alg = HASH_ALG_AES,
3257 .mode = HASH_MODE_GCM,
3258 },
3259 .auth_first = 0,
3260 },
3261 {
3262 .type = CRYPTO_ALG_TYPE_AEAD,
3263 .alg.aead = {
3264 .base = {
3265 .cra_name = "rfc4309(ccm(aes))",
3266 .cra_driver_name = "ccm-aes-esp-iproc",
3267 .cra_blocksize = AES_BLOCK_SIZE,
3268 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3269 },
3270 .setkey = aead_ccm_esp_setkey,
3271 .ivsize = CCM_AES_IV_SIZE,
3272 .maxauthsize = AES_BLOCK_SIZE,
3273 },
3274 .cipher_info = {
3275 .alg = CIPHER_ALG_AES,
3276 .mode = CIPHER_MODE_CCM,
3277 },
3278 .auth_info = {
3279 .alg = HASH_ALG_AES,
3280 .mode = HASH_MODE_CCM,
3281 },
3282 .auth_first = 0,
3283 },
3284 {
3285 .type = CRYPTO_ALG_TYPE_AEAD,
3286 .alg.aead = {
3287 .base = {
3288 .cra_name = "rfc4543(gcm(aes))",
3289 .cra_driver_name = "gmac-aes-esp-iproc",
3290 .cra_blocksize = AES_BLOCK_SIZE,
3291 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3292 },
3293 .setkey = rfc4543_gcm_esp_setkey,
3294 .ivsize = GCM_RFC4106_IV_SIZE,
3295 .maxauthsize = AES_BLOCK_SIZE,
3296 },
3297 .cipher_info = {
3298 .alg = CIPHER_ALG_AES,
3299 .mode = CIPHER_MODE_GCM,
3300 },
3301 .auth_info = {
3302 .alg = HASH_ALG_AES,
3303 .mode = HASH_MODE_GCM,
3304 },
3305 .auth_first = 0,
3306 },
3307 {
3308 .type = CRYPTO_ALG_TYPE_AEAD,
3309 .alg.aead = {
3310 .base = {
3311 .cra_name = "authenc(hmac(md5),cbc(aes))",
3312 .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
3313 .cra_blocksize = AES_BLOCK_SIZE,
3314 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3315 },
3316 .setkey = aead_authenc_setkey,
3317 .ivsize = AES_BLOCK_SIZE,
3318 .maxauthsize = MD5_DIGEST_SIZE,
3319 },
3320 .cipher_info = {
3321 .alg = CIPHER_ALG_AES,
3322 .mode = CIPHER_MODE_CBC,
3323 },
3324 .auth_info = {
3325 .alg = HASH_ALG_MD5,
3326 .mode = HASH_MODE_HMAC,
3327 },
3328 .auth_first = 0,
3329 },
3330 {
3331 .type = CRYPTO_ALG_TYPE_AEAD,
3332 .alg.aead = {
3333 .base = {
3334 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3335 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
3336 .cra_blocksize = AES_BLOCK_SIZE,
3337 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3338 },
3339 .setkey = aead_authenc_setkey,
3340 .ivsize = AES_BLOCK_SIZE,
3341 .maxauthsize = SHA1_DIGEST_SIZE,
3342 },
3343 .cipher_info = {
3344 .alg = CIPHER_ALG_AES,
3345 .mode = CIPHER_MODE_CBC,
3346 },
3347 .auth_info = {
3348 .alg = HASH_ALG_SHA1,
3349 .mode = HASH_MODE_HMAC,
3350 },
3351 .auth_first = 0,
3352 },
3353 {
3354 .type = CRYPTO_ALG_TYPE_AEAD,
3355 .alg.aead = {
3356 .base = {
3357 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3358 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
3359 .cra_blocksize = AES_BLOCK_SIZE,
3360 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3361 },
3362 .setkey = aead_authenc_setkey,
3363 .ivsize = AES_BLOCK_SIZE,
3364 .maxauthsize = SHA256_DIGEST_SIZE,
3365 },
3366 .cipher_info = {
3367 .alg = CIPHER_ALG_AES,
3368 .mode = CIPHER_MODE_CBC,
3369 },
3370 .auth_info = {
3371 .alg = HASH_ALG_SHA256,
3372 .mode = HASH_MODE_HMAC,
3373 },
3374 .auth_first = 0,
3375 },
3376 {
3377 .type = CRYPTO_ALG_TYPE_AEAD,
3378 .alg.aead = {
3379 .base = {
3380 .cra_name = "authenc(hmac(md5),cbc(des))",
3381 .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
3382 .cra_blocksize = DES_BLOCK_SIZE,
3383 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3384 },
3385 .setkey = aead_authenc_setkey,
3386 .ivsize = DES_BLOCK_SIZE,
3387 .maxauthsize = MD5_DIGEST_SIZE,
3388 },
3389 .cipher_info = {
3390 .alg = CIPHER_ALG_DES,
3391 .mode = CIPHER_MODE_CBC,
3392 },
3393 .auth_info = {
3394 .alg = HASH_ALG_MD5,
3395 .mode = HASH_MODE_HMAC,
3396 },
3397 .auth_first = 0,
3398 },
3399 {
3400 .type = CRYPTO_ALG_TYPE_AEAD,
3401 .alg.aead = {
3402 .base = {
3403 .cra_name = "authenc(hmac(sha1),cbc(des))",
3404 .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
3405 .cra_blocksize = DES_BLOCK_SIZE,
3406 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3407 },
3408 .setkey = aead_authenc_setkey,
3409 .ivsize = DES_BLOCK_SIZE,
3410 .maxauthsize = SHA1_DIGEST_SIZE,
3411 },
3412 .cipher_info = {
3413 .alg = CIPHER_ALG_DES,
3414 .mode = CIPHER_MODE_CBC,
3415 },
3416 .auth_info = {
3417 .alg = HASH_ALG_SHA1,
3418 .mode = HASH_MODE_HMAC,
3419 },
3420 .auth_first = 0,
3421 },
3422 {
3423 .type = CRYPTO_ALG_TYPE_AEAD,
3424 .alg.aead = {
3425 .base = {
3426 .cra_name = "authenc(hmac(sha224),cbc(des))",
3427 .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
3428 .cra_blocksize = DES_BLOCK_SIZE,
3429 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3430 },
3431 .setkey = aead_authenc_setkey,
3432 .ivsize = DES_BLOCK_SIZE,
3433 .maxauthsize = SHA224_DIGEST_SIZE,
3434 },
3435 .cipher_info = {
3436 .alg = CIPHER_ALG_DES,
3437 .mode = CIPHER_MODE_CBC,
3438 },
3439 .auth_info = {
3440 .alg = HASH_ALG_SHA224,
3441 .mode = HASH_MODE_HMAC,
3442 },
3443 .auth_first = 0,
3444 },
3445 {
3446 .type = CRYPTO_ALG_TYPE_AEAD,
3447 .alg.aead = {
3448 .base = {
3449 .cra_name = "authenc(hmac(sha256),cbc(des))",
3450 .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
3451 .cra_blocksize = DES_BLOCK_SIZE,
3452 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3453 },
3454 .setkey = aead_authenc_setkey,
3455 .ivsize = DES_BLOCK_SIZE,
3456 .maxauthsize = SHA256_DIGEST_SIZE,
3457 },
3458 .cipher_info = {
3459 .alg = CIPHER_ALG_DES,
3460 .mode = CIPHER_MODE_CBC,
3461 },
3462 .auth_info = {
3463 .alg = HASH_ALG_SHA256,
3464 .mode = HASH_MODE_HMAC,
3465 },
3466 .auth_first = 0,
3467 },
3468 {
3469 .type = CRYPTO_ALG_TYPE_AEAD,
3470 .alg.aead = {
3471 .base = {
3472 .cra_name = "authenc(hmac(sha384),cbc(des))",
3473 .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
3474 .cra_blocksize = DES_BLOCK_SIZE,
3475 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3476 },
3477 .setkey = aead_authenc_setkey,
3478 .ivsize = DES_BLOCK_SIZE,
3479 .maxauthsize = SHA384_DIGEST_SIZE,
3480 },
3481 .cipher_info = {
3482 .alg = CIPHER_ALG_DES,
3483 .mode = CIPHER_MODE_CBC,
3484 },
3485 .auth_info = {
3486 .alg = HASH_ALG_SHA384,
3487 .mode = HASH_MODE_HMAC,
3488 },
3489 .auth_first = 0,
3490 },
3491 {
3492 .type = CRYPTO_ALG_TYPE_AEAD,
3493 .alg.aead = {
3494 .base = {
3495 .cra_name = "authenc(hmac(sha512),cbc(des))",
3496 .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
3497 .cra_blocksize = DES_BLOCK_SIZE,
3498 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3499 },
3500 .setkey = aead_authenc_setkey,
3501 .ivsize = DES_BLOCK_SIZE,
3502 .maxauthsize = SHA512_DIGEST_SIZE,
3503 },
3504 .cipher_info = {
3505 .alg = CIPHER_ALG_DES,
3506 .mode = CIPHER_MODE_CBC,
3507 },
3508 .auth_info = {
3509 .alg = HASH_ALG_SHA512,
3510 .mode = HASH_MODE_HMAC,
3511 },
3512 .auth_first = 0,
3513 },
3514 {
3515 .type = CRYPTO_ALG_TYPE_AEAD,
3516 .alg.aead = {
3517 .base = {
3518 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3519 .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
3520 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3521 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3522 },
3523 .setkey = aead_authenc_setkey,
3524 .ivsize = DES3_EDE_BLOCK_SIZE,
3525 .maxauthsize = MD5_DIGEST_SIZE,
3526 },
3527 .cipher_info = {
3528 .alg = CIPHER_ALG_3DES,
3529 .mode = CIPHER_MODE_CBC,
3530 },
3531 .auth_info = {
3532 .alg = HASH_ALG_MD5,
3533 .mode = HASH_MODE_HMAC,
3534 },
3535 .auth_first = 0,
3536 },
3537 {
3538 .type = CRYPTO_ALG_TYPE_AEAD,
3539 .alg.aead = {
3540 .base = {
3541 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3542 .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
3543 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3544 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3545 },
3546 .setkey = aead_authenc_setkey,
3547 .ivsize = DES3_EDE_BLOCK_SIZE,
3548 .maxauthsize = SHA1_DIGEST_SIZE,
3549 },
3550 .cipher_info = {
3551 .alg = CIPHER_ALG_3DES,
3552 .mode = CIPHER_MODE_CBC,
3553 },
3554 .auth_info = {
3555 .alg = HASH_ALG_SHA1,
3556 .mode = HASH_MODE_HMAC,
3557 },
3558 .auth_first = 0,
3559 },
3560 {
3561 .type = CRYPTO_ALG_TYPE_AEAD,
3562 .alg.aead = {
3563 .base = {
3564 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
3565 .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
3566 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3567 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3568 },
3569 .setkey = aead_authenc_setkey,
3570 .ivsize = DES3_EDE_BLOCK_SIZE,
3571 .maxauthsize = SHA224_DIGEST_SIZE,
3572 },
3573 .cipher_info = {
3574 .alg = CIPHER_ALG_3DES,
3575 .mode = CIPHER_MODE_CBC,
3576 },
3577 .auth_info = {
3578 .alg = HASH_ALG_SHA224,
3579 .mode = HASH_MODE_HMAC,
3580 },
3581 .auth_first = 0,
3582 },
3583 {
3584 .type = CRYPTO_ALG_TYPE_AEAD,
3585 .alg.aead = {
3586 .base = {
3587 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
3588 .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
3589 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3590 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3591 },
3592 .setkey = aead_authenc_setkey,
3593 .ivsize = DES3_EDE_BLOCK_SIZE,
3594 .maxauthsize = SHA256_DIGEST_SIZE,
3595 },
3596 .cipher_info = {
3597 .alg = CIPHER_ALG_3DES,
3598 .mode = CIPHER_MODE_CBC,
3599 },
3600 .auth_info = {
3601 .alg = HASH_ALG_SHA256,
3602 .mode = HASH_MODE_HMAC,
3603 },
3604 .auth_first = 0,
3605 },
3606 {
3607 .type = CRYPTO_ALG_TYPE_AEAD,
3608 .alg.aead = {
3609 .base = {
3610 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
3611 .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
3612 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3613 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3614 },
3615 .setkey = aead_authenc_setkey,
3616 .ivsize = DES3_EDE_BLOCK_SIZE,
3617 .maxauthsize = SHA384_DIGEST_SIZE,
3618 },
3619 .cipher_info = {
3620 .alg = CIPHER_ALG_3DES,
3621 .mode = CIPHER_MODE_CBC,
3622 },
3623 .auth_info = {
3624 .alg = HASH_ALG_SHA384,
3625 .mode = HASH_MODE_HMAC,
3626 },
3627 .auth_first = 0,
3628 },
3629 {
3630 .type = CRYPTO_ALG_TYPE_AEAD,
3631 .alg.aead = {
3632 .base = {
3633 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
3634 .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
3635 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3636 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3637 },
3638 .setkey = aead_authenc_setkey,
3639 .ivsize = DES3_EDE_BLOCK_SIZE,
3640 .maxauthsize = SHA512_DIGEST_SIZE,
3641 },
3642 .cipher_info = {
3643 .alg = CIPHER_ALG_3DES,
3644 .mode = CIPHER_MODE_CBC,
3645 },
3646 .auth_info = {
3647 .alg = HASH_ALG_SHA512,
3648 .mode = HASH_MODE_HMAC,
3649 },
3650 .auth_first = 0,
3651 },
3652
3653
3654 {
3655 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3656 .alg.crypto = {
3657 .cra_name = "ecb(arc4)",
3658 .cra_driver_name = "ecb-arc4-iproc",
3659 .cra_blocksize = ARC4_BLOCK_SIZE,
3660 .cra_ablkcipher = {
3661 .min_keysize = ARC4_MIN_KEY_SIZE,
3662 .max_keysize = ARC4_MAX_KEY_SIZE,
3663 .ivsize = 0,
3664 }
3665 },
3666 .cipher_info = {
3667 .alg = CIPHER_ALG_RC4,
3668 .mode = CIPHER_MODE_NONE,
3669 },
3670 .auth_info = {
3671 .alg = HASH_ALG_NONE,
3672 .mode = HASH_MODE_NONE,
3673 },
3674 },
3675 {
3676 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3677 .alg.crypto = {
3678 .cra_name = "ofb(des)",
3679 .cra_driver_name = "ofb-des-iproc",
3680 .cra_blocksize = DES_BLOCK_SIZE,
3681 .cra_ablkcipher = {
3682 .min_keysize = DES_KEY_SIZE,
3683 .max_keysize = DES_KEY_SIZE,
3684 .ivsize = DES_BLOCK_SIZE,
3685 }
3686 },
3687 .cipher_info = {
3688 .alg = CIPHER_ALG_DES,
3689 .mode = CIPHER_MODE_OFB,
3690 },
3691 .auth_info = {
3692 .alg = HASH_ALG_NONE,
3693 .mode = HASH_MODE_NONE,
3694 },
3695 },
3696 {
3697 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3698 .alg.crypto = {
3699 .cra_name = "cbc(des)",
3700 .cra_driver_name = "cbc-des-iproc",
3701 .cra_blocksize = DES_BLOCK_SIZE,
3702 .cra_ablkcipher = {
3703 .min_keysize = DES_KEY_SIZE,
3704 .max_keysize = DES_KEY_SIZE,
3705 .ivsize = DES_BLOCK_SIZE,
3706 }
3707 },
3708 .cipher_info = {
3709 .alg = CIPHER_ALG_DES,
3710 .mode = CIPHER_MODE_CBC,
3711 },
3712 .auth_info = {
3713 .alg = HASH_ALG_NONE,
3714 .mode = HASH_MODE_NONE,
3715 },
3716 },
3717 {
3718 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3719 .alg.crypto = {
3720 .cra_name = "ecb(des)",
3721 .cra_driver_name = "ecb-des-iproc",
3722 .cra_blocksize = DES_BLOCK_SIZE,
3723 .cra_ablkcipher = {
3724 .min_keysize = DES_KEY_SIZE,
3725 .max_keysize = DES_KEY_SIZE,
3726 .ivsize = 0,
3727 }
3728 },
3729 .cipher_info = {
3730 .alg = CIPHER_ALG_DES,
3731 .mode = CIPHER_MODE_ECB,
3732 },
3733 .auth_info = {
3734 .alg = HASH_ALG_NONE,
3735 .mode = HASH_MODE_NONE,
3736 },
3737 },
3738 {
3739 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3740 .alg.crypto = {
3741 .cra_name = "ofb(des3_ede)",
3742 .cra_driver_name = "ofb-des3-iproc",
3743 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3744 .cra_ablkcipher = {
3745 .min_keysize = DES3_EDE_KEY_SIZE,
3746 .max_keysize = DES3_EDE_KEY_SIZE,
3747 .ivsize = DES3_EDE_BLOCK_SIZE,
3748 }
3749 },
3750 .cipher_info = {
3751 .alg = CIPHER_ALG_3DES,
3752 .mode = CIPHER_MODE_OFB,
3753 },
3754 .auth_info = {
3755 .alg = HASH_ALG_NONE,
3756 .mode = HASH_MODE_NONE,
3757 },
3758 },
3759 {
3760 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3761 .alg.crypto = {
3762 .cra_name = "cbc(des3_ede)",
3763 .cra_driver_name = "cbc-des3-iproc",
3764 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3765 .cra_ablkcipher = {
3766 .min_keysize = DES3_EDE_KEY_SIZE,
3767 .max_keysize = DES3_EDE_KEY_SIZE,
3768 .ivsize = DES3_EDE_BLOCK_SIZE,
3769 }
3770 },
3771 .cipher_info = {
3772 .alg = CIPHER_ALG_3DES,
3773 .mode = CIPHER_MODE_CBC,
3774 },
3775 .auth_info = {
3776 .alg = HASH_ALG_NONE,
3777 .mode = HASH_MODE_NONE,
3778 },
3779 },
3780 {
3781 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3782 .alg.crypto = {
3783 .cra_name = "ecb(des3_ede)",
3784 .cra_driver_name = "ecb-des3-iproc",
3785 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3786 .cra_ablkcipher = {
3787 .min_keysize = DES3_EDE_KEY_SIZE,
3788 .max_keysize = DES3_EDE_KEY_SIZE,
3789 .ivsize = 0,
3790 }
3791 },
3792 .cipher_info = {
3793 .alg = CIPHER_ALG_3DES,
3794 .mode = CIPHER_MODE_ECB,
3795 },
3796 .auth_info = {
3797 .alg = HASH_ALG_NONE,
3798 .mode = HASH_MODE_NONE,
3799 },
3800 },
3801 {
3802 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3803 .alg.crypto = {
3804 .cra_name = "ofb(aes)",
3805 .cra_driver_name = "ofb-aes-iproc",
3806 .cra_blocksize = AES_BLOCK_SIZE,
3807 .cra_ablkcipher = {
3808 .min_keysize = AES_MIN_KEY_SIZE,
3809 .max_keysize = AES_MAX_KEY_SIZE,
3810 .ivsize = AES_BLOCK_SIZE,
3811 }
3812 },
3813 .cipher_info = {
3814 .alg = CIPHER_ALG_AES,
3815 .mode = CIPHER_MODE_OFB,
3816 },
3817 .auth_info = {
3818 .alg = HASH_ALG_NONE,
3819 .mode = HASH_MODE_NONE,
3820 },
3821 },
3822 {
3823 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3824 .alg.crypto = {
3825 .cra_name = "cbc(aes)",
3826 .cra_driver_name = "cbc-aes-iproc",
3827 .cra_blocksize = AES_BLOCK_SIZE,
3828 .cra_ablkcipher = {
3829 .min_keysize = AES_MIN_KEY_SIZE,
3830 .max_keysize = AES_MAX_KEY_SIZE,
3831 .ivsize = AES_BLOCK_SIZE,
3832 }
3833 },
3834 .cipher_info = {
3835 .alg = CIPHER_ALG_AES,
3836 .mode = CIPHER_MODE_CBC,
3837 },
3838 .auth_info = {
3839 .alg = HASH_ALG_NONE,
3840 .mode = HASH_MODE_NONE,
3841 },
3842 },
3843 {
3844 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3845 .alg.crypto = {
3846 .cra_name = "ecb(aes)",
3847 .cra_driver_name = "ecb-aes-iproc",
3848 .cra_blocksize = AES_BLOCK_SIZE,
3849 .cra_ablkcipher = {
3850 .min_keysize = AES_MIN_KEY_SIZE,
3851 .max_keysize = AES_MAX_KEY_SIZE,
3852 .ivsize = 0,
3853 }
3854 },
3855 .cipher_info = {
3856 .alg = CIPHER_ALG_AES,
3857 .mode = CIPHER_MODE_ECB,
3858 },
3859 .auth_info = {
3860 .alg = HASH_ALG_NONE,
3861 .mode = HASH_MODE_NONE,
3862 },
3863 },
3864 {
3865 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3866 .alg.crypto = {
3867 .cra_name = "ctr(aes)",
3868 .cra_driver_name = "ctr-aes-iproc",
3869 .cra_blocksize = AES_BLOCK_SIZE,
3870 .cra_ablkcipher = {
3871
3872 .min_keysize = AES_MIN_KEY_SIZE,
3873 .max_keysize = AES_MAX_KEY_SIZE,
3874 .ivsize = AES_BLOCK_SIZE,
3875 }
3876 },
3877 .cipher_info = {
3878 .alg = CIPHER_ALG_AES,
3879 .mode = CIPHER_MODE_CTR,
3880 },
3881 .auth_info = {
3882 .alg = HASH_ALG_NONE,
3883 .mode = HASH_MODE_NONE,
3884 },
3885 },
3886{
3887 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3888 .alg.crypto = {
3889 .cra_name = "xts(aes)",
3890 .cra_driver_name = "xts-aes-iproc",
3891 .cra_blocksize = AES_BLOCK_SIZE,
3892 .cra_ablkcipher = {
3893 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3894 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3895 .ivsize = AES_BLOCK_SIZE,
3896 }
3897 },
3898 .cipher_info = {
3899 .alg = CIPHER_ALG_AES,
3900 .mode = CIPHER_MODE_XTS,
3901 },
3902 .auth_info = {
3903 .alg = HASH_ALG_NONE,
3904 .mode = HASH_MODE_NONE,
3905 },
3906 },
3907
3908
3909 {
3910 .type = CRYPTO_ALG_TYPE_AHASH,
3911 .alg.hash = {
3912 .halg.digestsize = MD5_DIGEST_SIZE,
3913 .halg.base = {
3914 .cra_name = "md5",
3915 .cra_driver_name = "md5-iproc",
3916 .cra_blocksize = MD5_BLOCK_WORDS * 4,
3917 .cra_flags = CRYPTO_ALG_ASYNC,
3918 }
3919 },
3920 .cipher_info = {
3921 .alg = CIPHER_ALG_NONE,
3922 .mode = CIPHER_MODE_NONE,
3923 },
3924 .auth_info = {
3925 .alg = HASH_ALG_MD5,
3926 .mode = HASH_MODE_HASH,
3927 },
3928 },
3929 {
3930 .type = CRYPTO_ALG_TYPE_AHASH,
3931 .alg.hash = {
3932 .halg.digestsize = MD5_DIGEST_SIZE,
3933 .halg.base = {
3934 .cra_name = "hmac(md5)",
3935 .cra_driver_name = "hmac-md5-iproc",
3936 .cra_blocksize = MD5_BLOCK_WORDS * 4,
3937 }
3938 },
3939 .cipher_info = {
3940 .alg = CIPHER_ALG_NONE,
3941 .mode = CIPHER_MODE_NONE,
3942 },
3943 .auth_info = {
3944 .alg = HASH_ALG_MD5,
3945 .mode = HASH_MODE_HMAC,
3946 },
3947 },
3948 {.type = CRYPTO_ALG_TYPE_AHASH,
3949 .alg.hash = {
3950 .halg.digestsize = SHA1_DIGEST_SIZE,
3951 .halg.base = {
3952 .cra_name = "sha1",
3953 .cra_driver_name = "sha1-iproc",
3954 .cra_blocksize = SHA1_BLOCK_SIZE,
3955 }
3956 },
3957 .cipher_info = {
3958 .alg = CIPHER_ALG_NONE,
3959 .mode = CIPHER_MODE_NONE,
3960 },
3961 .auth_info = {
3962 .alg = HASH_ALG_SHA1,
3963 .mode = HASH_MODE_HASH,
3964 },
3965 },
3966 {.type = CRYPTO_ALG_TYPE_AHASH,
3967 .alg.hash = {
3968 .halg.digestsize = SHA1_DIGEST_SIZE,
3969 .halg.base = {
3970 .cra_name = "hmac(sha1)",
3971 .cra_driver_name = "hmac-sha1-iproc",
3972 .cra_blocksize = SHA1_BLOCK_SIZE,
3973 }
3974 },
3975 .cipher_info = {
3976 .alg = CIPHER_ALG_NONE,
3977 .mode = CIPHER_MODE_NONE,
3978 },
3979 .auth_info = {
3980 .alg = HASH_ALG_SHA1,
3981 .mode = HASH_MODE_HMAC,
3982 },
3983 },
3984 {.type = CRYPTO_ALG_TYPE_AHASH,
3985 .alg.hash = {
3986 .halg.digestsize = SHA224_DIGEST_SIZE,
3987 .halg.base = {
3988 .cra_name = "sha224",
3989 .cra_driver_name = "sha224-iproc",
3990 .cra_blocksize = SHA224_BLOCK_SIZE,
3991 }
3992 },
3993 .cipher_info = {
3994 .alg = CIPHER_ALG_NONE,
3995 .mode = CIPHER_MODE_NONE,
3996 },
3997 .auth_info = {
3998 .alg = HASH_ALG_SHA224,
3999 .mode = HASH_MODE_HASH,
4000 },
4001 },
4002 {.type = CRYPTO_ALG_TYPE_AHASH,
4003 .alg.hash = {
4004 .halg.digestsize = SHA224_DIGEST_SIZE,
4005 .halg.base = {
4006 .cra_name = "hmac(sha224)",
4007 .cra_driver_name = "hmac-sha224-iproc",
4008 .cra_blocksize = SHA224_BLOCK_SIZE,
4009 }
4010 },
4011 .cipher_info = {
4012 .alg = CIPHER_ALG_NONE,
4013 .mode = CIPHER_MODE_NONE,
4014 },
4015 .auth_info = {
4016 .alg = HASH_ALG_SHA224,
4017 .mode = HASH_MODE_HMAC,
4018 },
4019 },
4020 {.type = CRYPTO_ALG_TYPE_AHASH,
4021 .alg.hash = {
4022 .halg.digestsize = SHA256_DIGEST_SIZE,
4023 .halg.base = {
4024 .cra_name = "sha256",
4025 .cra_driver_name = "sha256-iproc",
4026 .cra_blocksize = SHA256_BLOCK_SIZE,
4027 }
4028 },
4029 .cipher_info = {
4030 .alg = CIPHER_ALG_NONE,
4031 .mode = CIPHER_MODE_NONE,
4032 },
4033 .auth_info = {
4034 .alg = HASH_ALG_SHA256,
4035 .mode = HASH_MODE_HASH,
4036 },
4037 },
4038 {.type = CRYPTO_ALG_TYPE_AHASH,
4039 .alg.hash = {
4040 .halg.digestsize = SHA256_DIGEST_SIZE,
4041 .halg.base = {
4042 .cra_name = "hmac(sha256)",
4043 .cra_driver_name = "hmac-sha256-iproc",
4044 .cra_blocksize = SHA256_BLOCK_SIZE,
4045 }
4046 },
4047 .cipher_info = {
4048 .alg = CIPHER_ALG_NONE,
4049 .mode = CIPHER_MODE_NONE,
4050 },
4051 .auth_info = {
4052 .alg = HASH_ALG_SHA256,
4053 .mode = HASH_MODE_HMAC,
4054 },
4055 },
4056 {
4057 .type = CRYPTO_ALG_TYPE_AHASH,
4058 .alg.hash = {
4059 .halg.digestsize = SHA384_DIGEST_SIZE,
4060 .halg.base = {
4061 .cra_name = "sha384",
4062 .cra_driver_name = "sha384-iproc",
4063 .cra_blocksize = SHA384_BLOCK_SIZE,
4064 }
4065 },
4066 .cipher_info = {
4067 .alg = CIPHER_ALG_NONE,
4068 .mode = CIPHER_MODE_NONE,
4069 },
4070 .auth_info = {
4071 .alg = HASH_ALG_SHA384,
4072 .mode = HASH_MODE_HASH,
4073 },
4074 },
4075 {
4076 .type = CRYPTO_ALG_TYPE_AHASH,
4077 .alg.hash = {
4078 .halg.digestsize = SHA384_DIGEST_SIZE,
4079 .halg.base = {
4080 .cra_name = "hmac(sha384)",
4081 .cra_driver_name = "hmac-sha384-iproc",
4082 .cra_blocksize = SHA384_BLOCK_SIZE,
4083 }
4084 },
4085 .cipher_info = {
4086 .alg = CIPHER_ALG_NONE,
4087 .mode = CIPHER_MODE_NONE,
4088 },
4089 .auth_info = {
4090 .alg = HASH_ALG_SHA384,
4091 .mode = HASH_MODE_HMAC,
4092 },
4093 },
4094 {
4095 .type = CRYPTO_ALG_TYPE_AHASH,
4096 .alg.hash = {
4097 .halg.digestsize = SHA512_DIGEST_SIZE,
4098 .halg.base = {
4099 .cra_name = "sha512",
4100 .cra_driver_name = "sha512-iproc",
4101 .cra_blocksize = SHA512_BLOCK_SIZE,
4102 }
4103 },
4104 .cipher_info = {
4105 .alg = CIPHER_ALG_NONE,
4106 .mode = CIPHER_MODE_NONE,
4107 },
4108 .auth_info = {
4109 .alg = HASH_ALG_SHA512,
4110 .mode = HASH_MODE_HASH,
4111 },
4112 },
4113 {
4114 .type = CRYPTO_ALG_TYPE_AHASH,
4115 .alg.hash = {
4116 .halg.digestsize = SHA512_DIGEST_SIZE,
4117 .halg.base = {
4118 .cra_name = "hmac(sha512)",
4119 .cra_driver_name = "hmac-sha512-iproc",
4120 .cra_blocksize = SHA512_BLOCK_SIZE,
4121 }
4122 },
4123 .cipher_info = {
4124 .alg = CIPHER_ALG_NONE,
4125 .mode = CIPHER_MODE_NONE,
4126 },
4127 .auth_info = {
4128 .alg = HASH_ALG_SHA512,
4129 .mode = HASH_MODE_HMAC,
4130 },
4131 },
4132 {
4133 .type = CRYPTO_ALG_TYPE_AHASH,
4134 .alg.hash = {
4135 .halg.digestsize = SHA3_224_DIGEST_SIZE,
4136 .halg.base = {
4137 .cra_name = "sha3-224",
4138 .cra_driver_name = "sha3-224-iproc",
4139 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4140 }
4141 },
4142 .cipher_info = {
4143 .alg = CIPHER_ALG_NONE,
4144 .mode = CIPHER_MODE_NONE,
4145 },
4146 .auth_info = {
4147 .alg = HASH_ALG_SHA3_224,
4148 .mode = HASH_MODE_HASH,
4149 },
4150 },
4151 {
4152 .type = CRYPTO_ALG_TYPE_AHASH,
4153 .alg.hash = {
4154 .halg.digestsize = SHA3_224_DIGEST_SIZE,
4155 .halg.base = {
4156 .cra_name = "hmac(sha3-224)",
4157 .cra_driver_name = "hmac-sha3-224-iproc",
4158 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4159 }
4160 },
4161 .cipher_info = {
4162 .alg = CIPHER_ALG_NONE,
4163 .mode = CIPHER_MODE_NONE,
4164 },
4165 .auth_info = {
4166 .alg = HASH_ALG_SHA3_224,
4167 .mode = HASH_MODE_HMAC
4168 },
4169 },
4170 {
4171 .type = CRYPTO_ALG_TYPE_AHASH,
4172 .alg.hash = {
4173 .halg.digestsize = SHA3_256_DIGEST_SIZE,
4174 .halg.base = {
4175 .cra_name = "sha3-256",
4176 .cra_driver_name = "sha3-256-iproc",
4177 .cra_blocksize = SHA3_256_BLOCK_SIZE,
4178 }
4179 },
4180 .cipher_info = {
4181 .alg = CIPHER_ALG_NONE,
4182 .mode = CIPHER_MODE_NONE,
4183 },
4184 .auth_info = {
4185 .alg = HASH_ALG_SHA3_256,
4186 .mode = HASH_MODE_HASH,
4187 },
4188 },
4189 {
4190 .type = CRYPTO_ALG_TYPE_AHASH,
4191 .alg.hash = {
4192 .halg.digestsize = SHA3_256_DIGEST_SIZE,
4193 .halg.base = {
4194 .cra_name = "hmac(sha3-256)",
4195 .cra_driver_name = "hmac-sha3-256-iproc",
4196 .cra_blocksize = SHA3_256_BLOCK_SIZE,
4197 }
4198 },
4199 .cipher_info = {
4200 .alg = CIPHER_ALG_NONE,
4201 .mode = CIPHER_MODE_NONE,
4202 },
4203 .auth_info = {
4204 .alg = HASH_ALG_SHA3_256,
4205 .mode = HASH_MODE_HMAC,
4206 },
4207 },
4208 {
4209 .type = CRYPTO_ALG_TYPE_AHASH,
4210 .alg.hash = {
4211 .halg.digestsize = SHA3_384_DIGEST_SIZE,
4212 .halg.base = {
4213 .cra_name = "sha3-384",
4214 .cra_driver_name = "sha3-384-iproc",
4215 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4216 }
4217 },
4218 .cipher_info = {
4219 .alg = CIPHER_ALG_NONE,
4220 .mode = CIPHER_MODE_NONE,
4221 },
4222 .auth_info = {
4223 .alg = HASH_ALG_SHA3_384,
4224 .mode = HASH_MODE_HASH,
4225 },
4226 },
4227 {
4228 .type = CRYPTO_ALG_TYPE_AHASH,
4229 .alg.hash = {
4230 .halg.digestsize = SHA3_384_DIGEST_SIZE,
4231 .halg.base = {
4232 .cra_name = "hmac(sha3-384)",
4233 .cra_driver_name = "hmac-sha3-384-iproc",
4234 .cra_blocksize = SHA3_384_BLOCK_SIZE,
4235 }
4236 },
4237 .cipher_info = {
4238 .alg = CIPHER_ALG_NONE,
4239 .mode = CIPHER_MODE_NONE,
4240 },
4241 .auth_info = {
4242 .alg = HASH_ALG_SHA3_384,
4243 .mode = HASH_MODE_HMAC,
4244 },
4245 },
4246 {
4247 .type = CRYPTO_ALG_TYPE_AHASH,
4248 .alg.hash = {
4249 .halg.digestsize = SHA3_512_DIGEST_SIZE,
4250 .halg.base = {
4251 .cra_name = "sha3-512",
4252 .cra_driver_name = "sha3-512-iproc",
4253 .cra_blocksize = SHA3_512_BLOCK_SIZE,
4254 }
4255 },
4256 .cipher_info = {
4257 .alg = CIPHER_ALG_NONE,
4258 .mode = CIPHER_MODE_NONE,
4259 },
4260 .auth_info = {
4261 .alg = HASH_ALG_SHA3_512,
4262 .mode = HASH_MODE_HASH,
4263 },
4264 },
4265 {
4266 .type = CRYPTO_ALG_TYPE_AHASH,
4267 .alg.hash = {
4268 .halg.digestsize = SHA3_512_DIGEST_SIZE,
4269 .halg.base = {
4270 .cra_name = "hmac(sha3-512)",
4271 .cra_driver_name = "hmac-sha3-512-iproc",
4272 .cra_blocksize = SHA3_512_BLOCK_SIZE,
4273 }
4274 },
4275 .cipher_info = {
4276 .alg = CIPHER_ALG_NONE,
4277 .mode = CIPHER_MODE_NONE,
4278 },
4279 .auth_info = {
4280 .alg = HASH_ALG_SHA3_512,
4281 .mode = HASH_MODE_HMAC,
4282 },
4283 },
4284 {
4285 .type = CRYPTO_ALG_TYPE_AHASH,
4286 .alg.hash = {
4287 .halg.digestsize = AES_BLOCK_SIZE,
4288 .halg.base = {
4289 .cra_name = "xcbc(aes)",
4290 .cra_driver_name = "xcbc-aes-iproc",
4291 .cra_blocksize = AES_BLOCK_SIZE,
4292 }
4293 },
4294 .cipher_info = {
4295 .alg = CIPHER_ALG_NONE,
4296 .mode = CIPHER_MODE_NONE,
4297 },
4298 .auth_info = {
4299 .alg = HASH_ALG_AES,
4300 .mode = HASH_MODE_XCBC,
4301 },
4302 },
4303 {
4304 .type = CRYPTO_ALG_TYPE_AHASH,
4305 .alg.hash = {
4306 .halg.digestsize = AES_BLOCK_SIZE,
4307 .halg.base = {
4308 .cra_name = "cmac(aes)",
4309 .cra_driver_name = "cmac-aes-iproc",
4310 .cra_blocksize = AES_BLOCK_SIZE,
4311 }
4312 },
4313 .cipher_info = {
4314 .alg = CIPHER_ALG_NONE,
4315 .mode = CIPHER_MODE_NONE,
4316 },
4317 .auth_info = {
4318 .alg = HASH_ALG_AES,
4319 .mode = HASH_MODE_CMAC,
4320 },
4321 },
4322};
4323
4324static int generic_cra_init(struct crypto_tfm *tfm,
4325 struct iproc_alg_s *cipher_alg)
4326{
4327 struct spu_hw *spu = &iproc_priv.spu;
4328 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4329 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
4330
4331 flow_log("%s()\n", __func__);
4332
4333 ctx->alg = cipher_alg;
4334 ctx->cipher = cipher_alg->cipher_info;
4335 ctx->auth = cipher_alg->auth_info;
4336 ctx->auth_first = cipher_alg->auth_first;
4337 ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
4338 ctx->cipher.mode,
4339 blocksize);
4340 ctx->fallback_cipher = NULL;
4341
4342 ctx->enckeylen = 0;
4343 ctx->authkeylen = 0;
4344
4345 atomic_inc(&iproc_priv.stream_count);
4346 atomic_inc(&iproc_priv.session_count);
4347
4348 return 0;
4349}
4350
4351static int ablkcipher_cra_init(struct crypto_tfm *tfm)
4352{
4353 struct crypto_alg *alg = tfm->__crt_alg;
4354 struct iproc_alg_s *cipher_alg;
4355
4356 flow_log("%s()\n", __func__);
4357
4358 tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s);
4359
4360 cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto);
4361 return generic_cra_init(tfm, cipher_alg);
4362}
4363
4364static int ahash_cra_init(struct crypto_tfm *tfm)
4365{
4366 int err;
4367 struct crypto_alg *alg = tfm->__crt_alg;
4368 struct iproc_alg_s *cipher_alg;
4369
4370 cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
4371 alg.hash);
4372
4373 err = generic_cra_init(tfm, cipher_alg);
4374 flow_log("%s()\n", __func__);
4375
4376
4377
4378
4379
4380 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4381 sizeof(struct iproc_reqctx_s));
4382
4383 return err;
4384}
4385
4386static int aead_cra_init(struct crypto_aead *aead)
4387{
4388 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4389 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4390 struct crypto_alg *alg = tfm->__crt_alg;
4391 struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
4392 struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
4393 alg.aead);
4394
4395 int err = generic_cra_init(tfm, cipher_alg);
4396
4397 flow_log("%s()\n", __func__);
4398
4399 crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
4400 ctx->is_esp = false;
4401 ctx->salt_len = 0;
4402 ctx->salt_offset = 0;
4403
4404
4405 get_random_bytes(ctx->iv, MAX_IV_SIZE);
4406 flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE);
4407
4408 if (!err) {
4409 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
4410 flow_log("%s() creating fallback cipher\n", __func__);
4411
4412 ctx->fallback_cipher =
4413 crypto_alloc_aead(alg->cra_name, 0,
4414 CRYPTO_ALG_ASYNC |
4415 CRYPTO_ALG_NEED_FALLBACK);
4416 if (IS_ERR(ctx->fallback_cipher)) {
4417 pr_err("%s() Error: failed to allocate fallback for %s\n",
4418 __func__, alg->cra_name);
4419 return PTR_ERR(ctx->fallback_cipher);
4420 }
4421 }
4422 }
4423
4424 return err;
4425}
4426
4427static void generic_cra_exit(struct crypto_tfm *tfm)
4428{
4429 atomic_dec(&iproc_priv.session_count);
4430}
4431
4432static void aead_cra_exit(struct crypto_aead *aead)
4433{
4434 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4435 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4436
4437 generic_cra_exit(tfm);
4438
4439 if (ctx->fallback_cipher) {
4440 crypto_free_aead(ctx->fallback_cipher);
4441 ctx->fallback_cipher = NULL;
4442 }
4443}
4444
4445
4446
4447
4448
4449
4450
4451
4452static void spu_functions_register(struct device *dev,
4453 enum spu_spu_type spu_type,
4454 enum spu_spu_subtype spu_subtype)
4455{
4456 struct spu_hw *spu = &iproc_priv.spu;
4457
4458 if (spu_type == SPU_TYPE_SPUM) {
4459 dev_dbg(dev, "Registering SPUM functions");
4460 spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
4461 spu->spu_payload_length = spum_payload_length;
4462 spu->spu_response_hdr_len = spum_response_hdr_len;
4463 spu->spu_hash_pad_len = spum_hash_pad_len;
4464 spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
4465 spu->spu_assoc_resp_len = spum_assoc_resp_len;
4466 spu->spu_aead_ivlen = spum_aead_ivlen;
4467 spu->spu_hash_type = spum_hash_type;
4468 spu->spu_digest_size = spum_digest_size;
4469 spu->spu_create_request = spum_create_request;
4470 spu->spu_cipher_req_init = spum_cipher_req_init;
4471 spu->spu_cipher_req_finish = spum_cipher_req_finish;
4472 spu->spu_request_pad = spum_request_pad;
4473 spu->spu_tx_status_len = spum_tx_status_len;
4474 spu->spu_rx_status_len = spum_rx_status_len;
4475 spu->spu_status_process = spum_status_process;
4476 spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
4477 spu->spu_ccm_update_iv = spum_ccm_update_iv;
4478 spu->spu_wordalign_padlen = spum_wordalign_padlen;
4479 if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
4480 spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
4481 else
4482 spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
4483 } else {
4484 dev_dbg(dev, "Registering SPU2 functions");
4485 spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
4486 spu->spu_ctx_max_payload = spu2_ctx_max_payload;
4487 spu->spu_payload_length = spu2_payload_length;
4488 spu->spu_response_hdr_len = spu2_response_hdr_len;
4489 spu->spu_hash_pad_len = spu2_hash_pad_len;
4490 spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
4491 spu->spu_assoc_resp_len = spu2_assoc_resp_len;
4492 spu->spu_aead_ivlen = spu2_aead_ivlen;
4493 spu->spu_hash_type = spu2_hash_type;
4494 spu->spu_digest_size = spu2_digest_size;
4495 spu->spu_create_request = spu2_create_request;
4496 spu->spu_cipher_req_init = spu2_cipher_req_init;
4497 spu->spu_cipher_req_finish = spu2_cipher_req_finish;
4498 spu->spu_request_pad = spu2_request_pad;
4499 spu->spu_tx_status_len = spu2_tx_status_len;
4500 spu->spu_rx_status_len = spu2_rx_status_len;
4501 spu->spu_status_process = spu2_status_process;
4502 spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
4503 spu->spu_ccm_update_iv = spu2_ccm_update_iv;
4504 spu->spu_wordalign_padlen = spu2_wordalign_padlen;
4505 }
4506}
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516static int spu_mb_init(struct device *dev)
4517{
4518 struct mbox_client *mcl = &iproc_priv.mcl;
4519 int err, i;
4520
4521 iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
4522 sizeof(struct mbox_chan *), GFP_KERNEL);
4523 if (!iproc_priv.mbox)
4524 return -ENOMEM;
4525
4526 mcl->dev = dev;
4527 mcl->tx_block = false;
4528 mcl->tx_tout = 0;
4529 mcl->knows_txdone = true;
4530 mcl->rx_callback = spu_rx_callback;
4531 mcl->tx_done = NULL;
4532
4533 for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4534 iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
4535 if (IS_ERR(iproc_priv.mbox[i])) {
4536 err = (int)PTR_ERR(iproc_priv.mbox[i]);
4537 dev_err(dev,
4538 "Mbox channel %d request failed with err %d",
4539 i, err);
4540 iproc_priv.mbox[i] = NULL;
4541 goto free_channels;
4542 }
4543 }
4544
4545 return 0;
4546free_channels:
4547 for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4548 if (iproc_priv.mbox[i])
4549 mbox_free_channel(iproc_priv.mbox[i]);
4550 }
4551
4552 return err;
4553}
4554
4555static void spu_mb_release(struct platform_device *pdev)
4556{
4557 int i;
4558
4559 for (i = 0; i < iproc_priv.spu.num_chan; i++)
4560 mbox_free_channel(iproc_priv.mbox[i]);
4561}
4562
4563static void spu_counters_init(void)
4564{
4565 int i;
4566 int j;
4567
4568 atomic_set(&iproc_priv.session_count, 0);
4569 atomic_set(&iproc_priv.stream_count, 0);
4570 atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
4571 atomic64_set(&iproc_priv.bytes_in, 0);
4572 atomic64_set(&iproc_priv.bytes_out, 0);
4573 for (i = 0; i < SPU_OP_NUM; i++) {
4574 atomic_set(&iproc_priv.op_counts[i], 0);
4575 atomic_set(&iproc_priv.setkey_cnt[i], 0);
4576 }
4577 for (i = 0; i < CIPHER_ALG_LAST; i++)
4578 for (j = 0; j < CIPHER_MODE_LAST; j++)
4579 atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
4580
4581 for (i = 0; i < HASH_ALG_LAST; i++) {
4582 atomic_set(&iproc_priv.hash_cnt[i], 0);
4583 atomic_set(&iproc_priv.hmac_cnt[i], 0);
4584 }
4585 for (i = 0; i < AEAD_TYPE_LAST; i++)
4586 atomic_set(&iproc_priv.aead_cnt[i], 0);
4587
4588 atomic_set(&iproc_priv.mb_no_spc, 0);
4589 atomic_set(&iproc_priv.mb_send_fail, 0);
4590 atomic_set(&iproc_priv.bad_icv, 0);
4591}
4592
4593static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
4594{
4595 struct spu_hw *spu = &iproc_priv.spu;
4596 struct crypto_alg *crypto = &driver_alg->alg.crypto;
4597 int err;
4598
4599
4600 if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) &&
4601 (spu->spu_type == SPU_TYPE_SPU2))
4602 return 0;
4603
4604 crypto->cra_module = THIS_MODULE;
4605 crypto->cra_priority = cipher_pri;
4606 crypto->cra_alignmask = 0;
4607 crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
4608 INIT_LIST_HEAD(&crypto->cra_list);
4609
4610 crypto->cra_init = ablkcipher_cra_init;
4611 crypto->cra_exit = generic_cra_exit;
4612 crypto->cra_type = &crypto_ablkcipher_type;
4613 crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4614 CRYPTO_ALG_KERN_DRIVER_ONLY;
4615
4616 crypto->cra_ablkcipher.setkey = ablkcipher_setkey;
4617 crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt;
4618 crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt;
4619
4620 err = crypto_register_alg(crypto);
4621
4622 if (err == 0)
4623 driver_alg->registered = true;
4624 pr_debug(" registered ablkcipher %s\n", crypto->cra_driver_name);
4625 return err;
4626}
4627
4628static int spu_register_ahash(struct iproc_alg_s *driver_alg)
4629{
4630 struct spu_hw *spu = &iproc_priv.spu;
4631 struct ahash_alg *hash = &driver_alg->alg.hash;
4632 int err;
4633
4634
4635 if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4636 (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
4637 (spu->spu_type == SPU_TYPE_SPUM))
4638 return 0;
4639
4640
4641 if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
4642 (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
4643 return 0;
4644
4645 hash->halg.base.cra_module = THIS_MODULE;
4646 hash->halg.base.cra_priority = hash_pri;
4647 hash->halg.base.cra_alignmask = 0;
4648 hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4649 hash->halg.base.cra_init = ahash_cra_init;
4650 hash->halg.base.cra_exit = generic_cra_exit;
4651 hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4652 hash->halg.statesize = sizeof(struct spu_hash_export_s);
4653
4654 if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
4655 hash->setkey = ahash_setkey;
4656 hash->init = ahash_init;
4657 hash->update = ahash_update;
4658 hash->final = ahash_final;
4659 hash->finup = ahash_finup;
4660 hash->digest = ahash_digest;
4661 } else {
4662 hash->setkey = ahash_hmac_setkey;
4663 hash->init = ahash_hmac_init;
4664 hash->update = ahash_hmac_update;
4665 hash->final = ahash_hmac_final;
4666 hash->finup = ahash_hmac_finup;
4667 hash->digest = ahash_hmac_digest;
4668 }
4669 hash->export = ahash_export;
4670 hash->import = ahash_import;
4671
4672 err = crypto_register_ahash(hash);
4673
4674 if (err == 0)
4675 driver_alg->registered = true;
4676 pr_debug(" registered ahash %s\n",
4677 hash->halg.base.cra_driver_name);
4678 return err;
4679}
4680
4681static int spu_register_aead(struct iproc_alg_s *driver_alg)
4682{
4683 struct aead_alg *aead = &driver_alg->alg.aead;
4684 int err;
4685
4686 aead->base.cra_module = THIS_MODULE;
4687 aead->base.cra_priority = aead_pri;
4688 aead->base.cra_alignmask = 0;
4689 aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4690 INIT_LIST_HEAD(&aead->base.cra_list);
4691
4692 aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
4693
4694 aead->setauthsize = aead_setauthsize;
4695 aead->encrypt = aead_encrypt;
4696 aead->decrypt = aead_decrypt;
4697 aead->init = aead_cra_init;
4698 aead->exit = aead_cra_exit;
4699
4700 err = crypto_register_aead(aead);
4701
4702 if (err == 0)
4703 driver_alg->registered = true;
4704 pr_debug(" registered aead %s\n", aead->base.cra_driver_name);
4705 return err;
4706}
4707
4708
4709static int spu_algs_register(struct device *dev)
4710{
4711 int i, j;
4712 int err;
4713
4714 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4715 switch (driver_algs[i].type) {
4716 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4717 err = spu_register_ablkcipher(&driver_algs[i]);
4718 break;
4719 case CRYPTO_ALG_TYPE_AHASH:
4720 err = spu_register_ahash(&driver_algs[i]);
4721 break;
4722 case CRYPTO_ALG_TYPE_AEAD:
4723 err = spu_register_aead(&driver_algs[i]);
4724 break;
4725 default:
4726 dev_err(dev,
4727 "iproc-crypto: unknown alg type: %d",
4728 driver_algs[i].type);
4729 err = -EINVAL;
4730 }
4731
4732 if (err) {
4733 dev_err(dev, "alg registration failed with error %d\n",
4734 err);
4735 goto err_algs;
4736 }
4737 }
4738
4739 return 0;
4740
4741err_algs:
4742 for (j = 0; j < i; j++) {
4743
4744 if (!driver_algs[j].registered)
4745 continue;
4746 switch (driver_algs[j].type) {
4747 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4748 crypto_unregister_alg(&driver_algs[j].alg.crypto);
4749 driver_algs[j].registered = false;
4750 break;
4751 case CRYPTO_ALG_TYPE_AHASH:
4752 crypto_unregister_ahash(&driver_algs[j].alg.hash);
4753 driver_algs[j].registered = false;
4754 break;
4755 case CRYPTO_ALG_TYPE_AEAD:
4756 crypto_unregister_aead(&driver_algs[j].alg.aead);
4757 driver_algs[j].registered = false;
4758 break;
4759 }
4760 }
4761 return err;
4762}
4763
4764
4765
4766static struct spu_type_subtype spum_ns2_types = {
4767 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
4768};
4769
4770static struct spu_type_subtype spum_nsp_types = {
4771 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
4772};
4773
4774static struct spu_type_subtype spu2_types = {
4775 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
4776};
4777
4778static struct spu_type_subtype spu2_v2_types = {
4779 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
4780};
4781
4782static const struct of_device_id bcm_spu_dt_ids[] = {
4783 {
4784 .compatible = "brcm,spum-crypto",
4785 .data = &spum_ns2_types,
4786 },
4787 {
4788 .compatible = "brcm,spum-nsp-crypto",
4789 .data = &spum_nsp_types,
4790 },
4791 {
4792 .compatible = "brcm,spu2-crypto",
4793 .data = &spu2_types,
4794 },
4795 {
4796 .compatible = "brcm,spu2-v2-crypto",
4797 .data = &spu2_v2_types,
4798 },
4799 { }
4800};
4801
4802MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
4803
4804static int spu_dt_read(struct platform_device *pdev)
4805{
4806 struct device *dev = &pdev->dev;
4807 struct spu_hw *spu = &iproc_priv.spu;
4808 struct resource *spu_ctrl_regs;
4809 const struct spu_type_subtype *matched_spu_type;
4810 struct device_node *dn = pdev->dev.of_node;
4811 int err, i;
4812
4813
4814 spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
4815
4816 matched_spu_type = of_device_get_match_data(dev);
4817 if (!matched_spu_type) {
4818 dev_err(&pdev->dev, "Failed to match device\n");
4819 return -ENODEV;
4820 }
4821
4822 spu->spu_type = matched_spu_type->type;
4823 spu->spu_subtype = matched_spu_type->subtype;
4824
4825 i = 0;
4826 for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
4827 platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
4828
4829 spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
4830 if (IS_ERR(spu->reg_vbase[i])) {
4831 err = PTR_ERR(spu->reg_vbase[i]);
4832 dev_err(&pdev->dev, "Failed to map registers: %d\n",
4833 err);
4834 spu->reg_vbase[i] = NULL;
4835 return err;
4836 }
4837 }
4838 spu->num_spu = i;
4839 dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
4840
4841 return 0;
4842}
4843
4844int bcm_spu_probe(struct platform_device *pdev)
4845{
4846 struct device *dev = &pdev->dev;
4847 struct spu_hw *spu = &iproc_priv.spu;
4848 int err = 0;
4849
4850 iproc_priv.pdev = pdev;
4851 platform_set_drvdata(iproc_priv.pdev,
4852 &iproc_priv);
4853
4854 err = spu_dt_read(pdev);
4855 if (err < 0)
4856 goto failure;
4857
4858 err = spu_mb_init(&pdev->dev);
4859 if (err < 0)
4860 goto failure;
4861
4862 if (spu->spu_type == SPU_TYPE_SPUM)
4863 iproc_priv.bcm_hdr_len = 8;
4864 else if (spu->spu_type == SPU_TYPE_SPU2)
4865 iproc_priv.bcm_hdr_len = 0;
4866
4867 spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype);
4868
4869 spu_counters_init();
4870
4871 spu_setup_debugfs();
4872
4873 err = spu_algs_register(dev);
4874 if (err < 0)
4875 goto fail_reg;
4876
4877 return 0;
4878
4879fail_reg:
4880 spu_free_debugfs();
4881failure:
4882 spu_mb_release(pdev);
4883 dev_err(dev, "%s failed with error %d.\n", __func__, err);
4884
4885 return err;
4886}
4887
4888int bcm_spu_remove(struct platform_device *pdev)
4889{
4890 int i;
4891 struct device *dev = &pdev->dev;
4892 char *cdn;
4893
4894 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4895
4896
4897
4898
4899
4900 if (!driver_algs[i].registered)
4901 continue;
4902
4903 switch (driver_algs[i].type) {
4904 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4905 crypto_unregister_alg(&driver_algs[i].alg.crypto);
4906 dev_dbg(dev, " unregistered cipher %s\n",
4907 driver_algs[i].alg.crypto.cra_driver_name);
4908 driver_algs[i].registered = false;
4909 break;
4910 case CRYPTO_ALG_TYPE_AHASH:
4911 crypto_unregister_ahash(&driver_algs[i].alg.hash);
4912 cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
4913 dev_dbg(dev, " unregistered hash %s\n", cdn);
4914 driver_algs[i].registered = false;
4915 break;
4916 case CRYPTO_ALG_TYPE_AEAD:
4917 crypto_unregister_aead(&driver_algs[i].alg.aead);
4918 dev_dbg(dev, " unregistered aead %s\n",
4919 driver_algs[i].alg.aead.base.cra_driver_name);
4920 driver_algs[i].registered = false;
4921 break;
4922 }
4923 }
4924 spu_free_debugfs();
4925 spu_mb_release(pdev);
4926 return 0;
4927}
4928
4929
4930
4931static struct platform_driver bcm_spu_pdriver = {
4932 .driver = {
4933 .name = "brcm-spu-crypto",
4934 .of_match_table = of_match_ptr(bcm_spu_dt_ids),
4935 },
4936 .probe = bcm_spu_probe,
4937 .remove = bcm_spu_remove,
4938};
4939module_platform_driver(bcm_spu_pdriver);
4940
4941MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
4942MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4943MODULE_LICENSE("GPL v2");
4944