1
2
3
4
5
6
7
8
9#include <crypto/aes.h>
10#include <crypto/algapi.h>
11#include <linux/crypto.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kthread.h>
15#include <linux/platform_device.h>
16#include <linux/scatterlist.h>
17#include <linux/slab.h>
18#include <crypto/internal/hash.h>
19#include <crypto/sha.h>
20
21#include "mv_cesa.h"
22
23#define MV_CESA "MV-CESA:"
24#define MAX_HW_HASH_SIZE 0xFFFF
25
26
27
28
29
30
31
32
33
34
35
36enum engine_status {
37 ENGINE_IDLE,
38 ENGINE_BUSY,
39 ENGINE_W_DEQUEUE,
40};
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59struct req_progress {
60 struct sg_mapping_iter src_sg_it;
61 struct sg_mapping_iter dst_sg_it;
62 void (*complete) (void);
63 void (*process) (int is_first);
64
65
66 int sg_src_left;
67 int src_start;
68 int crypt_len;
69 int hw_nbytes;
70
71 int copy_back;
72 int sg_dst_left;
73 int dst_start;
74 int hw_processed_bytes;
75};
76
77struct crypto_priv {
78 void __iomem *reg;
79 void __iomem *sram;
80 int irq;
81 struct task_struct *queue_th;
82
83
84 spinlock_t lock;
85 struct crypto_queue queue;
86 enum engine_status eng_st;
87 struct crypto_async_request *cur_req;
88 struct req_progress p;
89 int max_req_size;
90 int sram_size;
91 int has_sha1;
92 int has_hmac_sha1;
93};
94
95static struct crypto_priv *cpg;
96
97struct mv_ctx {
98 u8 aes_enc_key[AES_KEY_LEN];
99 u32 aes_dec_key[8];
100 int key_len;
101 u32 need_calc_aes_dkey;
102};
103
104enum crypto_op {
105 COP_AES_ECB,
106 COP_AES_CBC,
107};
108
109struct mv_req_ctx {
110 enum crypto_op op;
111 int decrypt;
112};
113
114enum hash_op {
115 COP_SHA1,
116 COP_HMAC_SHA1
117};
118
119struct mv_tfm_hash_ctx {
120 struct crypto_shash *fallback;
121 struct crypto_shash *base_hash;
122 u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
123 int count_add;
124 enum hash_op op;
125};
126
127struct mv_req_hash_ctx {
128 u64 count;
129 u32 state[SHA1_DIGEST_SIZE / 4];
130 u8 buffer[SHA1_BLOCK_SIZE];
131 int first_hash;
132 int last_chunk;
133 int extra_bytes;
134 enum hash_op op;
135 int count_add;
136};
137
138static void compute_aes_dec_key(struct mv_ctx *ctx)
139{
140 struct crypto_aes_ctx gen_aes_key;
141 int key_pos;
142
143 if (!ctx->need_calc_aes_dkey)
144 return;
145
146 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
147
148 key_pos = ctx->key_len + 24;
149 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
150 switch (ctx->key_len) {
151 case AES_KEYSIZE_256:
152 key_pos -= 2;
153
154 case AES_KEYSIZE_192:
155 key_pos -= 2;
156 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
157 4 * 4);
158 break;
159 }
160 ctx->need_calc_aes_dkey = 0;
161}
162
163static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
164 unsigned int len)
165{
166 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
167 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
168
169 switch (len) {
170 case AES_KEYSIZE_128:
171 case AES_KEYSIZE_192:
172 case AES_KEYSIZE_256:
173 break;
174 default:
175 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
176 return -EINVAL;
177 }
178 ctx->key_len = len;
179 ctx->need_calc_aes_dkey = 1;
180
181 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
182 return 0;
183}
184
185static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
186{
187 int ret;
188 void *sbuf;
189 int copy_len;
190
191 while (len) {
192 if (!p->sg_src_left) {
193 ret = sg_miter_next(&p->src_sg_it);
194 BUG_ON(!ret);
195 p->sg_src_left = p->src_sg_it.length;
196 p->src_start = 0;
197 }
198
199 sbuf = p->src_sg_it.addr + p->src_start;
200
201 copy_len = min(p->sg_src_left, len);
202 memcpy(dbuf, sbuf, copy_len);
203
204 p->src_start += copy_len;
205 p->sg_src_left -= copy_len;
206
207 len -= copy_len;
208 dbuf += copy_len;
209 }
210}
211
212static void setup_data_in(void)
213{
214 struct req_progress *p = &cpg->p;
215 int data_in_sram =
216 min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
217 copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
218 data_in_sram - p->crypt_len);
219 p->crypt_len = data_in_sram;
220}
221
222static void mv_process_current_q(int first_block)
223{
224 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
225 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
226 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
227 struct sec_accel_config op;
228
229 switch (req_ctx->op) {
230 case COP_AES_ECB:
231 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
232 break;
233 case COP_AES_CBC:
234 default:
235 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
236 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
237 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
238 if (first_block)
239 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
240 break;
241 }
242 if (req_ctx->decrypt) {
243 op.config |= CFG_DIR_DEC;
244 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
245 AES_KEY_LEN);
246 } else {
247 op.config |= CFG_DIR_ENC;
248 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
249 AES_KEY_LEN);
250 }
251
252 switch (ctx->key_len) {
253 case AES_KEYSIZE_128:
254 op.config |= CFG_AES_LEN_128;
255 break;
256 case AES_KEYSIZE_192:
257 op.config |= CFG_AES_LEN_192;
258 break;
259 case AES_KEYSIZE_256:
260 op.config |= CFG_AES_LEN_256;
261 break;
262 }
263 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
264 ENC_P_DST(SRAM_DATA_OUT_START);
265 op.enc_key_p = SRAM_DATA_KEY_P;
266
267 setup_data_in();
268 op.enc_len = cpg->p.crypt_len;
269 memcpy(cpg->sram + SRAM_CONFIG, &op,
270 sizeof(struct sec_accel_config));
271
272
273 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
274
275
276
277
278
279}
280
281static void mv_crypto_algo_completion(void)
282{
283 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
284 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
285
286 sg_miter_stop(&cpg->p.src_sg_it);
287 sg_miter_stop(&cpg->p.dst_sg_it);
288
289 if (req_ctx->op != COP_AES_CBC)
290 return ;
291
292 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
293}
294
295static void mv_process_hash_current(int first_block)
296{
297 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
298 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
299 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
300 struct req_progress *p = &cpg->p;
301 struct sec_accel_config op = { 0 };
302 int is_last;
303
304 switch (req_ctx->op) {
305 case COP_SHA1:
306 default:
307 op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
308 break;
309 case COP_HMAC_SHA1:
310 op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
311 memcpy(cpg->sram + SRAM_HMAC_IV_IN,
312 tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
313 break;
314 }
315
316 op.mac_src_p =
317 MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
318 req_ctx->
319 count);
320
321 setup_data_in();
322
323 op.mac_digest =
324 MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
325 op.mac_iv =
326 MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
327 MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
328
329 is_last = req_ctx->last_chunk
330 && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
331 && (req_ctx->count <= MAX_HW_HASH_SIZE);
332 if (req_ctx->first_hash) {
333 if (is_last)
334 op.config |= CFG_NOT_FRAG;
335 else
336 op.config |= CFG_FIRST_FRAG;
337
338 req_ctx->first_hash = 0;
339 } else {
340 if (is_last)
341 op.config |= CFG_LAST_FRAG;
342 else
343 op.config |= CFG_MID_FRAG;
344
345 writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
346 writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
347 writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
348 writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
349 writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
350 }
351
352 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
353
354
355 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
356
357
358
359
360
361}
362
363static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
364 struct shash_desc *desc)
365{
366 int i;
367 struct sha1_state shash_state;
368
369 shash_state.count = ctx->count + ctx->count_add;
370 for (i = 0; i < 5; i++)
371 shash_state.state[i] = ctx->state[i];
372 memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
373 return crypto_shash_import(desc, &shash_state);
374}
375
376static int mv_hash_final_fallback(struct ahash_request *req)
377{
378 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
379 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
380 struct {
381 struct shash_desc shash;
382 char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
383 } desc;
384 int rc;
385
386 desc.shash.tfm = tfm_ctx->fallback;
387 desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
388 if (unlikely(req_ctx->first_hash)) {
389 crypto_shash_init(&desc.shash);
390 crypto_shash_update(&desc.shash, req_ctx->buffer,
391 req_ctx->extra_bytes);
392 } else {
393
394
395 rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
396 if (rc)
397 goto out;
398 }
399 rc = crypto_shash_final(&desc.shash, req->result);
400out:
401 return rc;
402}
403
404static void mv_hash_algo_completion(void)
405{
406 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
407 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
408
409 if (ctx->extra_bytes)
410 copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
411 sg_miter_stop(&cpg->p.src_sg_it);
412
413 if (likely(ctx->last_chunk)) {
414 if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
415 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
416 crypto_ahash_digestsize(crypto_ahash_reqtfm
417 (req)));
418 } else
419 mv_hash_final_fallback(req);
420 } else {
421 ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
422 ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
423 ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
424 ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
425 ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
426 }
427}
428
429static void dequeue_complete_req(void)
430{
431 struct crypto_async_request *req = cpg->cur_req;
432 void *buf;
433 int ret;
434 cpg->p.hw_processed_bytes += cpg->p.crypt_len;
435 if (cpg->p.copy_back) {
436 int need_copy_len = cpg->p.crypt_len;
437 int sram_offset = 0;
438 do {
439 int dst_copy;
440
441 if (!cpg->p.sg_dst_left) {
442 ret = sg_miter_next(&cpg->p.dst_sg_it);
443 BUG_ON(!ret);
444 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
445 cpg->p.dst_start = 0;
446 }
447
448 buf = cpg->p.dst_sg_it.addr;
449 buf += cpg->p.dst_start;
450
451 dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
452
453 memcpy(buf,
454 cpg->sram + SRAM_DATA_OUT_START + sram_offset,
455 dst_copy);
456 sram_offset += dst_copy;
457 cpg->p.sg_dst_left -= dst_copy;
458 need_copy_len -= dst_copy;
459 cpg->p.dst_start += dst_copy;
460 } while (need_copy_len > 0);
461 }
462
463 cpg->p.crypt_len = 0;
464
465 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
466 if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
467
468 cpg->eng_st = ENGINE_BUSY;
469 cpg->p.process(0);
470 } else {
471 cpg->p.complete();
472 cpg->eng_st = ENGINE_IDLE;
473 local_bh_disable();
474 req->complete(req, 0);
475 local_bh_enable();
476 }
477}
478
479static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
480{
481 int i = 0;
482 size_t cur_len;
483
484 while (sl) {
485 cur_len = sl[i].length;
486 ++i;
487 if (total_bytes > cur_len)
488 total_bytes -= cur_len;
489 else
490 break;
491 }
492
493 return i;
494}
495
496static void mv_start_new_crypt_req(struct ablkcipher_request *req)
497{
498 struct req_progress *p = &cpg->p;
499 int num_sgs;
500
501 cpg->cur_req = &req->base;
502 memset(p, 0, sizeof(struct req_progress));
503 p->hw_nbytes = req->nbytes;
504 p->complete = mv_crypto_algo_completion;
505 p->process = mv_process_current_q;
506 p->copy_back = 1;
507
508 num_sgs = count_sgs(req->src, req->nbytes);
509 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
510
511 num_sgs = count_sgs(req->dst, req->nbytes);
512 sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
513
514 mv_process_current_q(1);
515}
516
517static void mv_start_new_hash_req(struct ahash_request *req)
518{
519 struct req_progress *p = &cpg->p;
520 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
521 int num_sgs, hw_bytes, old_extra_bytes, rc;
522 cpg->cur_req = &req->base;
523 memset(p, 0, sizeof(struct req_progress));
524 hw_bytes = req->nbytes + ctx->extra_bytes;
525 old_extra_bytes = ctx->extra_bytes;
526
527 ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
528 if (ctx->extra_bytes != 0
529 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
530 hw_bytes -= ctx->extra_bytes;
531 else
532 ctx->extra_bytes = 0;
533
534 num_sgs = count_sgs(req->src, req->nbytes);
535 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
536
537 if (hw_bytes) {
538 p->hw_nbytes = hw_bytes;
539 p->complete = mv_hash_algo_completion;
540 p->process = mv_process_hash_current;
541
542 if (unlikely(old_extra_bytes)) {
543 memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
544 old_extra_bytes);
545 p->crypt_len = old_extra_bytes;
546 }
547
548 mv_process_hash_current(1);
549 } else {
550 copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
551 ctx->extra_bytes - old_extra_bytes);
552 sg_miter_stop(&p->src_sg_it);
553 if (ctx->last_chunk)
554 rc = mv_hash_final_fallback(req);
555 else
556 rc = 0;
557 cpg->eng_st = ENGINE_IDLE;
558 local_bh_disable();
559 req->base.complete(&req->base, rc);
560 local_bh_enable();
561 }
562}
563
564static int queue_manag(void *data)
565{
566 cpg->eng_st = ENGINE_IDLE;
567 do {
568 struct crypto_async_request *async_req = NULL;
569 struct crypto_async_request *backlog;
570
571 __set_current_state(TASK_INTERRUPTIBLE);
572
573 if (cpg->eng_st == ENGINE_W_DEQUEUE)
574 dequeue_complete_req();
575
576 spin_lock_irq(&cpg->lock);
577 if (cpg->eng_st == ENGINE_IDLE) {
578 backlog = crypto_get_backlog(&cpg->queue);
579 async_req = crypto_dequeue_request(&cpg->queue);
580 if (async_req) {
581 BUG_ON(cpg->eng_st != ENGINE_IDLE);
582 cpg->eng_st = ENGINE_BUSY;
583 }
584 }
585 spin_unlock_irq(&cpg->lock);
586
587 if (backlog) {
588 backlog->complete(backlog, -EINPROGRESS);
589 backlog = NULL;
590 }
591
592 if (async_req) {
593 if (async_req->tfm->__crt_alg->cra_type !=
594 &crypto_ahash_type) {
595 struct ablkcipher_request *req =
596 ablkcipher_request_cast(async_req);
597 mv_start_new_crypt_req(req);
598 } else {
599 struct ahash_request *req =
600 ahash_request_cast(async_req);
601 mv_start_new_hash_req(req);
602 }
603 async_req = NULL;
604 }
605
606 schedule();
607
608 } while (!kthread_should_stop());
609 return 0;
610}
611
612static int mv_handle_req(struct crypto_async_request *req)
613{
614 unsigned long flags;
615 int ret;
616
617 spin_lock_irqsave(&cpg->lock, flags);
618 ret = crypto_enqueue_request(&cpg->queue, req);
619 spin_unlock_irqrestore(&cpg->lock, flags);
620 wake_up_process(cpg->queue_th);
621 return ret;
622}
623
624static int mv_enc_aes_ecb(struct ablkcipher_request *req)
625{
626 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
627
628 req_ctx->op = COP_AES_ECB;
629 req_ctx->decrypt = 0;
630
631 return mv_handle_req(&req->base);
632}
633
634static int mv_dec_aes_ecb(struct ablkcipher_request *req)
635{
636 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
637 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
638
639 req_ctx->op = COP_AES_ECB;
640 req_ctx->decrypt = 1;
641
642 compute_aes_dec_key(ctx);
643 return mv_handle_req(&req->base);
644}
645
646static int mv_enc_aes_cbc(struct ablkcipher_request *req)
647{
648 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
649
650 req_ctx->op = COP_AES_CBC;
651 req_ctx->decrypt = 0;
652
653 return mv_handle_req(&req->base);
654}
655
656static int mv_dec_aes_cbc(struct ablkcipher_request *req)
657{
658 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
659 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
660
661 req_ctx->op = COP_AES_CBC;
662 req_ctx->decrypt = 1;
663
664 compute_aes_dec_key(ctx);
665 return mv_handle_req(&req->base);
666}
667
668static int mv_cra_init(struct crypto_tfm *tfm)
669{
670 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
671 return 0;
672}
673
674static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
675 int is_last, unsigned int req_len,
676 int count_add)
677{
678 memset(ctx, 0, sizeof(*ctx));
679 ctx->op = op;
680 ctx->count = req_len;
681 ctx->first_hash = 1;
682 ctx->last_chunk = is_last;
683 ctx->count_add = count_add;
684}
685
686static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
687 unsigned req_len)
688{
689 ctx->last_chunk = is_last;
690 ctx->count += req_len;
691}
692
693static int mv_hash_init(struct ahash_request *req)
694{
695 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
696 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
697 tfm_ctx->count_add);
698 return 0;
699}
700
701static int mv_hash_update(struct ahash_request *req)
702{
703 if (!req->nbytes)
704 return 0;
705
706 mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
707 return mv_handle_req(&req->base);
708}
709
710static int mv_hash_final(struct ahash_request *req)
711{
712 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
713
714 mv_update_hash_req_ctx(ctx, 1, 0);
715 return mv_handle_req(&req->base);
716}
717
718static int mv_hash_finup(struct ahash_request *req)
719{
720 mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
721 return mv_handle_req(&req->base);
722}
723
724static int mv_hash_digest(struct ahash_request *req)
725{
726 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
727 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
728 req->nbytes, tfm_ctx->count_add);
729 return mv_handle_req(&req->base);
730}
731
732static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
733 const void *ostate)
734{
735 const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
736 int i;
737 for (i = 0; i < 5; i++) {
738 ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
739 ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
740 }
741}
742
743static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
744 unsigned int keylen)
745{
746 int rc;
747 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
748 int bs, ds, ss;
749
750 if (!ctx->base_hash)
751 return 0;
752
753 rc = crypto_shash_setkey(ctx->fallback, key, keylen);
754 if (rc)
755 return rc;
756
757
758
759 bs = crypto_shash_blocksize(ctx->base_hash);
760 ds = crypto_shash_digestsize(ctx->base_hash);
761 ss = crypto_shash_statesize(ctx->base_hash);
762
763 {
764 struct {
765 struct shash_desc shash;
766 char ctx[crypto_shash_descsize(ctx->base_hash)];
767 } desc;
768 unsigned int i;
769 char ipad[ss];
770 char opad[ss];
771
772 desc.shash.tfm = ctx->base_hash;
773 desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
774 CRYPTO_TFM_REQ_MAY_SLEEP;
775
776 if (keylen > bs) {
777 int err;
778
779 err =
780 crypto_shash_digest(&desc.shash, key, keylen, ipad);
781 if (err)
782 return err;
783
784 keylen = ds;
785 } else
786 memcpy(ipad, key, keylen);
787
788 memset(ipad + keylen, 0, bs - keylen);
789 memcpy(opad, ipad, bs);
790
791 for (i = 0; i < bs; i++) {
792 ipad[i] ^= 0x36;
793 opad[i] ^= 0x5c;
794 }
795
796 rc = crypto_shash_init(&desc.shash) ? :
797 crypto_shash_update(&desc.shash, ipad, bs) ? :
798 crypto_shash_export(&desc.shash, ipad) ? :
799 crypto_shash_init(&desc.shash) ? :
800 crypto_shash_update(&desc.shash, opad, bs) ? :
801 crypto_shash_export(&desc.shash, opad);
802
803 if (rc == 0)
804 mv_hash_init_ivs(ctx, ipad, opad);
805
806 return rc;
807 }
808}
809
810static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
811 enum hash_op op, int count_add)
812{
813 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
814 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
815 struct crypto_shash *fallback_tfm = NULL;
816 struct crypto_shash *base_hash = NULL;
817 int err = -ENOMEM;
818
819 ctx->op = op;
820 ctx->count_add = count_add;
821
822
823 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
824 CRYPTO_ALG_NEED_FALLBACK);
825 if (IS_ERR(fallback_tfm)) {
826 printk(KERN_WARNING MV_CESA
827 "Fallback driver '%s' could not be loaded!\n",
828 fallback_driver_name);
829 err = PTR_ERR(fallback_tfm);
830 goto out;
831 }
832 ctx->fallback = fallback_tfm;
833
834 if (base_hash_name) {
835
836 base_hash = crypto_alloc_shash(base_hash_name, 0,
837 CRYPTO_ALG_NEED_FALLBACK);
838 if (IS_ERR(base_hash)) {
839 printk(KERN_WARNING MV_CESA
840 "Base driver '%s' could not be loaded!\n",
841 base_hash_name);
842 err = PTR_ERR(base_hash);
843 goto err_bad_base;
844 }
845 }
846 ctx->base_hash = base_hash;
847
848 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
849 sizeof(struct mv_req_hash_ctx) +
850 crypto_shash_descsize(ctx->fallback));
851 return 0;
852err_bad_base:
853 crypto_free_shash(fallback_tfm);
854out:
855 return err;
856}
857
858static void mv_cra_hash_exit(struct crypto_tfm *tfm)
859{
860 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
861
862 crypto_free_shash(ctx->fallback);
863 if (ctx->base_hash)
864 crypto_free_shash(ctx->base_hash);
865}
866
867static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
868{
869 return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
870}
871
872static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
873{
874 return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
875}
876
877irqreturn_t crypto_int(int irq, void *priv)
878{
879 u32 val;
880
881 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
882 if (!(val & SEC_INT_ACCEL0_DONE))
883 return IRQ_NONE;
884
885 val &= ~SEC_INT_ACCEL0_DONE;
886 writel(val, cpg->reg + FPGA_INT_STATUS);
887 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
888 BUG_ON(cpg->eng_st != ENGINE_BUSY);
889 cpg->eng_st = ENGINE_W_DEQUEUE;
890 wake_up_process(cpg->queue_th);
891 return IRQ_HANDLED;
892}
893
894struct crypto_alg mv_aes_alg_ecb = {
895 .cra_name = "ecb(aes)",
896 .cra_driver_name = "mv-ecb-aes",
897 .cra_priority = 300,
898 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
899 .cra_blocksize = 16,
900 .cra_ctxsize = sizeof(struct mv_ctx),
901 .cra_alignmask = 0,
902 .cra_type = &crypto_ablkcipher_type,
903 .cra_module = THIS_MODULE,
904 .cra_init = mv_cra_init,
905 .cra_u = {
906 .ablkcipher = {
907 .min_keysize = AES_MIN_KEY_SIZE,
908 .max_keysize = AES_MAX_KEY_SIZE,
909 .setkey = mv_setkey_aes,
910 .encrypt = mv_enc_aes_ecb,
911 .decrypt = mv_dec_aes_ecb,
912 },
913 },
914};
915
916struct crypto_alg mv_aes_alg_cbc = {
917 .cra_name = "cbc(aes)",
918 .cra_driver_name = "mv-cbc-aes",
919 .cra_priority = 300,
920 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
921 .cra_blocksize = AES_BLOCK_SIZE,
922 .cra_ctxsize = sizeof(struct mv_ctx),
923 .cra_alignmask = 0,
924 .cra_type = &crypto_ablkcipher_type,
925 .cra_module = THIS_MODULE,
926 .cra_init = mv_cra_init,
927 .cra_u = {
928 .ablkcipher = {
929 .ivsize = AES_BLOCK_SIZE,
930 .min_keysize = AES_MIN_KEY_SIZE,
931 .max_keysize = AES_MAX_KEY_SIZE,
932 .setkey = mv_setkey_aes,
933 .encrypt = mv_enc_aes_cbc,
934 .decrypt = mv_dec_aes_cbc,
935 },
936 },
937};
938
939struct ahash_alg mv_sha1_alg = {
940 .init = mv_hash_init,
941 .update = mv_hash_update,
942 .final = mv_hash_final,
943 .finup = mv_hash_finup,
944 .digest = mv_hash_digest,
945 .halg = {
946 .digestsize = SHA1_DIGEST_SIZE,
947 .base = {
948 .cra_name = "sha1",
949 .cra_driver_name = "mv-sha1",
950 .cra_priority = 300,
951 .cra_flags =
952 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
953 .cra_blocksize = SHA1_BLOCK_SIZE,
954 .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
955 .cra_init = mv_cra_hash_sha1_init,
956 .cra_exit = mv_cra_hash_exit,
957 .cra_module = THIS_MODULE,
958 }
959 }
960};
961
962struct ahash_alg mv_hmac_sha1_alg = {
963 .init = mv_hash_init,
964 .update = mv_hash_update,
965 .final = mv_hash_final,
966 .finup = mv_hash_finup,
967 .digest = mv_hash_digest,
968 .setkey = mv_hash_setkey,
969 .halg = {
970 .digestsize = SHA1_DIGEST_SIZE,
971 .base = {
972 .cra_name = "hmac(sha1)",
973 .cra_driver_name = "mv-hmac-sha1",
974 .cra_priority = 300,
975 .cra_flags =
976 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
977 .cra_blocksize = SHA1_BLOCK_SIZE,
978 .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
979 .cra_init = mv_cra_hash_hmac_sha1_init,
980 .cra_exit = mv_cra_hash_exit,
981 .cra_module = THIS_MODULE,
982 }
983 }
984};
985
986static int mv_probe(struct platform_device *pdev)
987{
988 struct crypto_priv *cp;
989 struct resource *res;
990 int irq;
991 int ret;
992
993 if (cpg) {
994 printk(KERN_ERR MV_CESA "Second crypto dev?\n");
995 return -EEXIST;
996 }
997
998 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
999 if (!res)
1000 return -ENXIO;
1001
1002 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1003 if (!cp)
1004 return -ENOMEM;
1005
1006 spin_lock_init(&cp->lock);
1007 crypto_init_queue(&cp->queue, 50);
1008 cp->reg = ioremap(res->start, resource_size(res));
1009 if (!cp->reg) {
1010 ret = -ENOMEM;
1011 goto err;
1012 }
1013
1014 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
1015 if (!res) {
1016 ret = -ENXIO;
1017 goto err_unmap_reg;
1018 }
1019 cp->sram_size = resource_size(res);
1020 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
1021 cp->sram = ioremap(res->start, cp->sram_size);
1022 if (!cp->sram) {
1023 ret = -ENOMEM;
1024 goto err_unmap_reg;
1025 }
1026
1027 irq = platform_get_irq(pdev, 0);
1028 if (irq < 0 || irq == NO_IRQ) {
1029 ret = irq;
1030 goto err_unmap_sram;
1031 }
1032 cp->irq = irq;
1033
1034 platform_set_drvdata(pdev, cp);
1035 cpg = cp;
1036
1037 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
1038 if (IS_ERR(cp->queue_th)) {
1039 ret = PTR_ERR(cp->queue_th);
1040 goto err_unmap_sram;
1041 }
1042
1043 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
1044 cp);
1045 if (ret)
1046 goto err_thread;
1047
1048 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1049 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1050 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
1051
1052 ret = crypto_register_alg(&mv_aes_alg_ecb);
1053 if (ret) {
1054 printk(KERN_WARNING MV_CESA
1055 "Could not register aes-ecb driver\n");
1056 goto err_irq;
1057 }
1058
1059 ret = crypto_register_alg(&mv_aes_alg_cbc);
1060 if (ret) {
1061 printk(KERN_WARNING MV_CESA
1062 "Could not register aes-cbc driver\n");
1063 goto err_unreg_ecb;
1064 }
1065
1066 ret = crypto_register_ahash(&mv_sha1_alg);
1067 if (ret == 0)
1068 cpg->has_sha1 = 1;
1069 else
1070 printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
1071
1072 ret = crypto_register_ahash(&mv_hmac_sha1_alg);
1073 if (ret == 0) {
1074 cpg->has_hmac_sha1 = 1;
1075 } else {
1076 printk(KERN_WARNING MV_CESA
1077 "Could not register hmac-sha1 driver\n");
1078 }
1079
1080 return 0;
1081err_unreg_ecb:
1082 crypto_unregister_alg(&mv_aes_alg_ecb);
1083err_irq:
1084 free_irq(irq, cp);
1085err_thread:
1086 kthread_stop(cp->queue_th);
1087err_unmap_sram:
1088 iounmap(cp->sram);
1089err_unmap_reg:
1090 iounmap(cp->reg);
1091err:
1092 kfree(cp);
1093 cpg = NULL;
1094 platform_set_drvdata(pdev, NULL);
1095 return ret;
1096}
1097
1098static int mv_remove(struct platform_device *pdev)
1099{
1100 struct crypto_priv *cp = platform_get_drvdata(pdev);
1101
1102 crypto_unregister_alg(&mv_aes_alg_ecb);
1103 crypto_unregister_alg(&mv_aes_alg_cbc);
1104 if (cp->has_sha1)
1105 crypto_unregister_ahash(&mv_sha1_alg);
1106 if (cp->has_hmac_sha1)
1107 crypto_unregister_ahash(&mv_hmac_sha1_alg);
1108 kthread_stop(cp->queue_th);
1109 free_irq(cp->irq, cp);
1110 memset(cp->sram, 0, cp->sram_size);
1111 iounmap(cp->sram);
1112 iounmap(cp->reg);
1113 kfree(cp);
1114 cpg = NULL;
1115 return 0;
1116}
1117
1118static struct platform_driver marvell_crypto = {
1119 .probe = mv_probe,
1120 .remove = mv_remove,
1121 .driver = {
1122 .owner = THIS_MODULE,
1123 .name = "mv_crypto",
1124 },
1125};
1126MODULE_ALIAS("platform:mv_crypto");
1127
1128static int __init mv_crypto_init(void)
1129{
1130 return platform_driver_register(&marvell_crypto);
1131}
1132module_init(mv_crypto_init);
1133
1134static void __exit mv_crypto_exit(void)
1135{
1136 platform_driver_unregister(&marvell_crypto);
1137}
1138module_exit(mv_crypto_exit);
1139
1140MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
1141MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
1142MODULE_LICENSE("GPL");
1143