1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56#include <crypto/internal/hash.h>
57#include <linux/init.h>
58#include <linux/module.h>
59#include <linux/mm.h>
60#include <linux/cryptohash.h>
61#include <linux/types.h>
62#include <linux/list.h>
63#include <crypto/scatterwalk.h>
64#include <crypto/sha.h>
65#include <crypto/mcryptd.h>
66#include <crypto/crypto_wq.h>
67#include <asm/byteorder.h>
68#include <linux/hardirq.h>
69#include <asm/fpu/api.h>
70#include "sha1_mb_ctx.h"
71
72#define FLUSH_INTERVAL 1000
73
74static struct mcryptd_alg_state sha1_mb_alg_state;
75
76struct sha1_mb_ctx {
77 struct mcryptd_ahash *mcryptd_tfm;
78};
79
80static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
82{
83 struct ahash_request *areq;
84
85 areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
86 return container_of(areq, struct mcryptd_hash_request_ctx, areq);
87}
88
89static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
91{
92 return container_of((void *) ctx, struct ahash_request, __ctx);
93}
94
95static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
96 struct ahash_request *areq)
97{
98 rctx->flag = HASH_UPDATE;
99}
100
101static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
102static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)
103 (struct sha1_mb_mgr *state, struct job_sha1 *job);
104static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
105 (struct sha1_mb_mgr *state);
106static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
107 (struct sha1_mb_mgr *state);
108
109static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
110 uint64_t total_len)
111{
112 uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
113
114 memset(&padblock[i], 0, SHA1_BLOCK_SIZE);
115 padblock[i] = 0x80;
116
117 i += ((SHA1_BLOCK_SIZE - 1) &
118 (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1)))
119 + 1 + SHA1_PADLENGTHFIELD_SIZE;
120
121#if SHA1_PADLENGTHFIELD_SIZE == 16
122 *((uint64_t *) &padblock[i - 16]) = 0;
123#endif
124
125 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
126
127
128 return i >> SHA1_LOG2_BLOCK_SIZE;
129}
130
131static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr,
132 struct sha1_hash_ctx *ctx)
133{
134 while (ctx) {
135 if (ctx->status & HASH_CTX_STS_COMPLETE) {
136
137 ctx->status = HASH_CTX_STS_COMPLETE;
138 return ctx;
139 }
140
141
142
143
144
145 if (ctx->partial_block_buffer_length == 0 &&
146 ctx->incoming_buffer_length) {
147
148 const void *buffer = ctx->incoming_buffer;
149 uint32_t len = ctx->incoming_buffer_length;
150 uint32_t copy_len;
151
152
153
154
155
156 copy_len = len & (SHA1_BLOCK_SIZE-1);
157
158 if (copy_len) {
159 len -= copy_len;
160 memcpy(ctx->partial_block_buffer,
161 ((const char *) buffer + len),
162 copy_len);
163 ctx->partial_block_buffer_length = copy_len;
164 }
165
166 ctx->incoming_buffer_length = 0;
167
168
169 assert((len % SHA1_BLOCK_SIZE) == 0);
170
171
172 len >>= SHA1_LOG2_BLOCK_SIZE;
173
174 if (len) {
175
176 ctx->job.buffer = (uint8_t *) buffer;
177 ctx->job.len = len;
178 ctx = (struct sha1_hash_ctx *)sha1_job_mgr_submit(&mgr->mgr,
179 &ctx->job);
180 continue;
181 }
182 }
183
184
185
186
187
188
189 if (ctx->status & HASH_CTX_STS_LAST) {
190
191 uint8_t *buf = ctx->partial_block_buffer;
192 uint32_t n_extra_blocks =
193 sha1_pad(buf, ctx->total_length);
194
195 ctx->status = (HASH_CTX_STS_PROCESSING |
196 HASH_CTX_STS_COMPLETE);
197 ctx->job.buffer = buf;
198 ctx->job.len = (uint32_t) n_extra_blocks;
199 ctx = (struct sha1_hash_ctx *)
200 sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
201 continue;
202 }
203
204 ctx->status = HASH_CTX_STS_IDLE;
205 return ctx;
206 }
207
208 return NULL;
209}
210
211static struct sha1_hash_ctx
212 *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
213{
214
215
216
217
218
219
220
221
222
223 struct sha1_hash_ctx *ctx;
224
225 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr);
226 return sha1_ctx_mgr_resubmit(mgr, ctx);
227}
228
229static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr)
230{
231 sha1_job_mgr_init(&mgr->mgr);
232}
233
234static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
235 struct sha1_hash_ctx *ctx,
236 const void *buffer,
237 uint32_t len,
238 int flags)
239{
240 if (flags & ~(HASH_UPDATE | HASH_LAST)) {
241
242 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
243 return ctx;
244 }
245
246 if (ctx->status & HASH_CTX_STS_PROCESSING) {
247
248 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
249 return ctx;
250 }
251
252 if (ctx->status & HASH_CTX_STS_COMPLETE) {
253
254 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
255 return ctx;
256 }
257
258
259
260
261
262 ctx->error = HASH_CTX_ERROR_NONE;
263
264
265 ctx->incoming_buffer = buffer;
266 ctx->incoming_buffer_length = len;
267
268
269
270
271
272 ctx->status = (flags & HASH_LAST) ?
273 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
274 HASH_CTX_STS_PROCESSING;
275
276
277 ctx->total_length += len;
278
279
280
281
282
283
284
285 if (ctx->partial_block_buffer_length || len < SHA1_BLOCK_SIZE) {
286
287
288
289
290 uint32_t copy_len = SHA1_BLOCK_SIZE -
291 ctx->partial_block_buffer_length;
292 if (len < copy_len)
293 copy_len = len;
294
295 if (copy_len) {
296
297 memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
298 buffer, copy_len);
299
300 ctx->partial_block_buffer_length += copy_len;
301 ctx->incoming_buffer = (const void *)
302 ((const char *)buffer + copy_len);
303 ctx->incoming_buffer_length = len - copy_len;
304 }
305
306
307
308
309
310 assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
311
312
313
314
315
316 if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
317 ctx->partial_block_buffer_length = 0;
318
319 ctx->job.buffer = ctx->partial_block_buffer;
320 ctx->job.len = 1;
321 ctx = (struct sha1_hash_ctx *)
322 sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
323 }
324 }
325
326 return sha1_ctx_mgr_resubmit(mgr, ctx);
327}
328
329static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
330{
331 struct sha1_hash_ctx *ctx;
332
333 while (1) {
334 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr);
335
336
337 if (!ctx)
338 return NULL;
339
340
341
342
343
344 ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
345
346
347
348
349
350
351 if (ctx)
352 return ctx;
353 }
354}
355
356static int sha1_mb_init(struct ahash_request *areq)
357{
358 struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
359
360 hash_ctx_init(sctx);
361 sctx->job.result_digest[0] = SHA1_H0;
362 sctx->job.result_digest[1] = SHA1_H1;
363 sctx->job.result_digest[2] = SHA1_H2;
364 sctx->job.result_digest[3] = SHA1_H3;
365 sctx->job.result_digest[4] = SHA1_H4;
366 sctx->total_length = 0;
367 sctx->partial_block_buffer_length = 0;
368 sctx->status = HASH_CTX_STS_IDLE;
369
370 return 0;
371}
372
373static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
374{
375 int i;
376 struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
377 __be32 *dst = (__be32 *) rctx->out;
378
379 for (i = 0; i < 5; ++i)
380 dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
381
382 return 0;
383}
384
385static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
386 struct mcryptd_alg_cstate *cstate, bool flush)
387{
388 int flag = HASH_UPDATE;
389 int nbytes, err = 0;
390 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
391 struct sha1_hash_ctx *sha_ctx;
392
393
394 while (!(rctx->flag & HASH_DONE)) {
395 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
396 if (nbytes < 0) {
397 err = nbytes;
398 goto out;
399 }
400
401 if (crypto_ahash_walk_last(&rctx->walk)) {
402 rctx->flag |= HASH_DONE;
403 if (rctx->flag & HASH_FINAL)
404 flag |= HASH_LAST;
405
406 }
407 sha_ctx = (struct sha1_hash_ctx *)
408 ahash_request_ctx(&rctx->areq);
409 kernel_fpu_begin();
410 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
411 rctx->walk.data, nbytes, flag);
412 if (!sha_ctx) {
413 if (flush)
414 sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
415 }
416 kernel_fpu_end();
417 if (sha_ctx)
418 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
419 else {
420 rctx = NULL;
421 goto out;
422 }
423 }
424
425
426 if (rctx->flag & HASH_FINAL)
427 sha1_mb_set_results(rctx);
428
429out:
430 *ret_rctx = rctx;
431 return err;
432}
433
434static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
435 struct mcryptd_alg_cstate *cstate,
436 int err)
437{
438 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
439 struct sha1_hash_ctx *sha_ctx;
440 struct mcryptd_hash_request_ctx *req_ctx;
441 int ret;
442
443
444 spin_lock(&cstate->work_lock);
445 list_del(&rctx->waiter);
446 spin_unlock(&cstate->work_lock);
447
448 if (irqs_disabled())
449 rctx->complete(&req->base, err);
450 else {
451 local_bh_disable();
452 rctx->complete(&req->base, err);
453 local_bh_enable();
454 }
455
456
457 sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
458 while (sha_ctx) {
459 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
460 ret = sha_finish_walk(&req_ctx, cstate, false);
461 if (req_ctx) {
462 spin_lock(&cstate->work_lock);
463 list_del(&req_ctx->waiter);
464 spin_unlock(&cstate->work_lock);
465
466 req = cast_mcryptd_ctx_to_req(req_ctx);
467 if (irqs_disabled())
468 req_ctx->complete(&req->base, ret);
469 else {
470 local_bh_disable();
471 req_ctx->complete(&req->base, ret);
472 local_bh_enable();
473 }
474 }
475 sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
476 }
477
478 return 0;
479}
480
481static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
482 struct mcryptd_alg_cstate *cstate)
483{
484 unsigned long next_flush;
485 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
486
487
488 rctx->tag.arrival = jiffies;
489 rctx->tag.seq_num = cstate->next_seq_num++;
490 next_flush = rctx->tag.arrival + delay;
491 rctx->tag.expire = next_flush;
492
493 spin_lock(&cstate->work_lock);
494 list_add_tail(&rctx->waiter, &cstate->work_list);
495 spin_unlock(&cstate->work_lock);
496
497 mcryptd_arm_flusher(cstate, delay);
498}
499
500static int sha1_mb_update(struct ahash_request *areq)
501{
502 struct mcryptd_hash_request_ctx *rctx =
503 container_of(areq, struct mcryptd_hash_request_ctx, areq);
504 struct mcryptd_alg_cstate *cstate =
505 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
506
507 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
508 struct sha1_hash_ctx *sha_ctx;
509 int ret = 0, nbytes;
510
511
512
513 if (rctx->tag.cpu != smp_processor_id()) {
514 pr_err("mcryptd error: cpu clash\n");
515 goto done;
516 }
517
518
519 req_ctx_init(rctx, areq);
520
521 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
522
523 if (nbytes < 0) {
524 ret = nbytes;
525 goto done;
526 }
527
528 if (crypto_ahash_walk_last(&rctx->walk))
529 rctx->flag |= HASH_DONE;
530
531
532 sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
533 sha1_mb_add_list(rctx, cstate);
534 kernel_fpu_begin();
535 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
536 nbytes, HASH_UPDATE);
537 kernel_fpu_end();
538
539
540 if (!sha_ctx)
541 return -EINPROGRESS;
542
543 if (sha_ctx->error) {
544 ret = sha_ctx->error;
545 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
546 goto done;
547 }
548
549 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
550 ret = sha_finish_walk(&rctx, cstate, false);
551
552 if (!rctx)
553 return -EINPROGRESS;
554done:
555 sha_complete_job(rctx, cstate, ret);
556 return ret;
557}
558
559static int sha1_mb_finup(struct ahash_request *areq)
560{
561 struct mcryptd_hash_request_ctx *rctx =
562 container_of(areq, struct mcryptd_hash_request_ctx, areq);
563 struct mcryptd_alg_cstate *cstate =
564 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
565
566 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
567 struct sha1_hash_ctx *sha_ctx;
568 int ret = 0, flag = HASH_UPDATE, nbytes;
569
570
571 if (rctx->tag.cpu != smp_processor_id()) {
572 pr_err("mcryptd error: cpu clash\n");
573 goto done;
574 }
575
576
577 req_ctx_init(rctx, areq);
578
579 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
580
581 if (nbytes < 0) {
582 ret = nbytes;
583 goto done;
584 }
585
586 if (crypto_ahash_walk_last(&rctx->walk)) {
587 rctx->flag |= HASH_DONE;
588 flag = HASH_LAST;
589 }
590
591
592 rctx->flag |= HASH_FINAL;
593 sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
594 sha1_mb_add_list(rctx, cstate);
595
596 kernel_fpu_begin();
597 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
598 nbytes, flag);
599 kernel_fpu_end();
600
601
602 if (!sha_ctx)
603 return -EINPROGRESS;
604
605 if (sha_ctx->error) {
606 ret = sha_ctx->error;
607 goto done;
608 }
609
610 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
611 ret = sha_finish_walk(&rctx, cstate, false);
612 if (!rctx)
613 return -EINPROGRESS;
614done:
615 sha_complete_job(rctx, cstate, ret);
616 return ret;
617}
618
619static int sha1_mb_final(struct ahash_request *areq)
620{
621 struct mcryptd_hash_request_ctx *rctx =
622 container_of(areq, struct mcryptd_hash_request_ctx, areq);
623 struct mcryptd_alg_cstate *cstate =
624 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
625
626 struct sha1_hash_ctx *sha_ctx;
627 int ret = 0;
628 u8 data;
629
630
631 if (rctx->tag.cpu != smp_processor_id()) {
632 pr_err("mcryptd error: cpu clash\n");
633 goto done;
634 }
635
636
637 req_ctx_init(rctx, areq);
638
639 rctx->flag |= HASH_DONE | HASH_FINAL;
640
641 sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
642
643 sha1_mb_add_list(rctx, cstate);
644 kernel_fpu_begin();
645 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
646 HASH_LAST);
647 kernel_fpu_end();
648
649
650 if (!sha_ctx)
651 return -EINPROGRESS;
652
653 if (sha_ctx->error) {
654 ret = sha_ctx->error;
655 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
656 goto done;
657 }
658
659 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
660 ret = sha_finish_walk(&rctx, cstate, false);
661 if (!rctx)
662 return -EINPROGRESS;
663done:
664 sha_complete_job(rctx, cstate, ret);
665 return ret;
666}
667
668static int sha1_mb_export(struct ahash_request *areq, void *out)
669{
670 struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
671
672 memcpy(out, sctx, sizeof(*sctx));
673
674 return 0;
675}
676
677static int sha1_mb_import(struct ahash_request *areq, const void *in)
678{
679 struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
680
681 memcpy(sctx, in, sizeof(*sctx));
682
683 return 0;
684}
685
686static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
687{
688 struct mcryptd_ahash *mcryptd_tfm;
689 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
690 struct mcryptd_hash_ctx *mctx;
691
692 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
693 CRYPTO_ALG_INTERNAL,
694 CRYPTO_ALG_INTERNAL);
695 if (IS_ERR(mcryptd_tfm))
696 return PTR_ERR(mcryptd_tfm);
697 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
698 mctx->alg_state = &sha1_mb_alg_state;
699 ctx->mcryptd_tfm = mcryptd_tfm;
700 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
701 sizeof(struct ahash_request) +
702 crypto_ahash_reqsize(&mcryptd_tfm->base));
703
704 return 0;
705}
706
707static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
708{
709 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
710
711 mcryptd_free_ahash(ctx->mcryptd_tfm);
712}
713
714static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
715{
716 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
717 sizeof(struct ahash_request) +
718 sizeof(struct sha1_hash_ctx));
719
720 return 0;
721}
722
723static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
724{
725 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
726
727 mcryptd_free_ahash(ctx->mcryptd_tfm);
728}
729
730static struct ahash_alg sha1_mb_areq_alg = {
731 .init = sha1_mb_init,
732 .update = sha1_mb_update,
733 .final = sha1_mb_final,
734 .finup = sha1_mb_finup,
735 .export = sha1_mb_export,
736 .import = sha1_mb_import,
737 .halg = {
738 .digestsize = SHA1_DIGEST_SIZE,
739 .statesize = sizeof(struct sha1_hash_ctx),
740 .base = {
741 .cra_name = "__sha1-mb",
742 .cra_driver_name = "__intel_sha1-mb",
743 .cra_priority = 100,
744
745
746
747
748
749 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
750 CRYPTO_ALG_ASYNC |
751 CRYPTO_ALG_INTERNAL,
752 .cra_blocksize = SHA1_BLOCK_SIZE,
753 .cra_module = THIS_MODULE,
754 .cra_list = LIST_HEAD_INIT
755 (sha1_mb_areq_alg.halg.base.cra_list),
756 .cra_init = sha1_mb_areq_init_tfm,
757 .cra_exit = sha1_mb_areq_exit_tfm,
758 .cra_ctxsize = sizeof(struct sha1_hash_ctx),
759 }
760 }
761};
762
763static int sha1_mb_async_init(struct ahash_request *req)
764{
765 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
766 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
767 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
768 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
769
770 memcpy(mcryptd_req, req, sizeof(*req));
771 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
772 return crypto_ahash_init(mcryptd_req);
773}
774
775static int sha1_mb_async_update(struct ahash_request *req)
776{
777 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
778
779 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
780 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
781 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
782
783 memcpy(mcryptd_req, req, sizeof(*req));
784 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
785 return crypto_ahash_update(mcryptd_req);
786}
787
788static int sha1_mb_async_finup(struct ahash_request *req)
789{
790 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
791
792 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
793 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
794 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
795
796 memcpy(mcryptd_req, req, sizeof(*req));
797 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
798 return crypto_ahash_finup(mcryptd_req);
799}
800
801static int sha1_mb_async_final(struct ahash_request *req)
802{
803 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
804
805 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
806 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
807 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
808
809 memcpy(mcryptd_req, req, sizeof(*req));
810 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
811 return crypto_ahash_final(mcryptd_req);
812}
813
814static int sha1_mb_async_digest(struct ahash_request *req)
815{
816 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
817 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
818 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
819 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
820
821 memcpy(mcryptd_req, req, sizeof(*req));
822 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
823 return crypto_ahash_digest(mcryptd_req);
824}
825
826static int sha1_mb_async_export(struct ahash_request *req, void *out)
827{
828 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
829 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
830 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
831 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
832
833 memcpy(mcryptd_req, req, sizeof(*req));
834 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
835 return crypto_ahash_export(mcryptd_req, out);
836}
837
838static int sha1_mb_async_import(struct ahash_request *req, const void *in)
839{
840 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
841 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
842 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
843 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
844 struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
845 struct mcryptd_hash_request_ctx *rctx;
846 struct ahash_request *areq;
847
848 memcpy(mcryptd_req, req, sizeof(*req));
849 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
850 rctx = ahash_request_ctx(mcryptd_req);
851 areq = &rctx->areq;
852
853 ahash_request_set_tfm(areq, child);
854 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
855 rctx->complete, req);
856
857 return crypto_ahash_import(mcryptd_req, in);
858}
859
860static struct ahash_alg sha1_mb_async_alg = {
861 .init = sha1_mb_async_init,
862 .update = sha1_mb_async_update,
863 .final = sha1_mb_async_final,
864 .finup = sha1_mb_async_finup,
865 .digest = sha1_mb_async_digest,
866 .export = sha1_mb_async_export,
867 .import = sha1_mb_async_import,
868 .halg = {
869 .digestsize = SHA1_DIGEST_SIZE,
870 .statesize = sizeof(struct sha1_hash_ctx),
871 .base = {
872 .cra_name = "sha1",
873 .cra_driver_name = "sha1_mb",
874 .cra_priority = 200,
875 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
876 .cra_blocksize = SHA1_BLOCK_SIZE,
877 .cra_type = &crypto_ahash_type,
878 .cra_module = THIS_MODULE,
879 .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
880 .cra_init = sha1_mb_async_init_tfm,
881 .cra_exit = sha1_mb_async_exit_tfm,
882 .cra_ctxsize = sizeof(struct sha1_mb_ctx),
883 .cra_alignmask = 0,
884 },
885 },
886};
887
888static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
889{
890 struct mcryptd_hash_request_ctx *rctx;
891 unsigned long cur_time;
892 unsigned long next_flush = 0;
893 struct sha1_hash_ctx *sha_ctx;
894
895
896 cur_time = jiffies;
897
898 while (!list_empty(&cstate->work_list)) {
899 rctx = list_entry(cstate->work_list.next,
900 struct mcryptd_hash_request_ctx, waiter);
901 if (time_before(cur_time, rctx->tag.expire))
902 break;
903 kernel_fpu_begin();
904 sha_ctx = (struct sha1_hash_ctx *)
905 sha1_ctx_mgr_flush(cstate->mgr);
906 kernel_fpu_end();
907 if (!sha_ctx) {
908 pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
909 break;
910 }
911 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
912 sha_finish_walk(&rctx, cstate, true);
913 sha_complete_job(rctx, cstate, 0);
914 }
915
916 if (!list_empty(&cstate->work_list)) {
917 rctx = list_entry(cstate->work_list.next,
918 struct mcryptd_hash_request_ctx, waiter);
919
920 next_flush = rctx->tag.expire;
921 mcryptd_arm_flusher(cstate, get_delay(next_flush));
922 }
923 return next_flush;
924}
925
926static int __init sha1_mb_mod_init(void)
927{
928
929 int cpu;
930 int err;
931 struct mcryptd_alg_cstate *cpu_state;
932
933
934 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
935 !boot_cpu_has(X86_FEATURE_BMI2))
936 return -ENODEV;
937
938
939 sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate);
940
941 sha1_job_mgr_init = sha1_mb_mgr_init_avx2;
942 sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2;
943 sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2;
944 sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2;
945
946 if (!sha1_mb_alg_state.alg_cstate)
947 return -ENOMEM;
948 for_each_possible_cpu(cpu) {
949 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
950 cpu_state->next_flush = 0;
951 cpu_state->next_seq_num = 0;
952 cpu_state->flusher_engaged = false;
953 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
954 cpu_state->cpu = cpu;
955 cpu_state->alg_state = &sha1_mb_alg_state;
956 cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr),
957 GFP_KERNEL);
958 if (!cpu_state->mgr)
959 goto err2;
960 sha1_ctx_mgr_init(cpu_state->mgr);
961 INIT_LIST_HEAD(&cpu_state->work_list);
962 spin_lock_init(&cpu_state->work_lock);
963 }
964 sha1_mb_alg_state.flusher = &sha1_mb_flusher;
965
966 err = crypto_register_ahash(&sha1_mb_areq_alg);
967 if (err)
968 goto err2;
969 err = crypto_register_ahash(&sha1_mb_async_alg);
970 if (err)
971 goto err1;
972
973
974 return 0;
975err1:
976 crypto_unregister_ahash(&sha1_mb_areq_alg);
977err2:
978 for_each_possible_cpu(cpu) {
979 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
980 kfree(cpu_state->mgr);
981 }
982 free_percpu(sha1_mb_alg_state.alg_cstate);
983 return -ENODEV;
984}
985
986static void __exit sha1_mb_mod_fini(void)
987{
988 int cpu;
989 struct mcryptd_alg_cstate *cpu_state;
990
991 crypto_unregister_ahash(&sha1_mb_async_alg);
992 crypto_unregister_ahash(&sha1_mb_areq_alg);
993 for_each_possible_cpu(cpu) {
994 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
995 kfree(cpu_state->mgr);
996 }
997 free_percpu(sha1_mb_alg_state.alg_cstate);
998}
999
1000module_init(sha1_mb_mod_init);
1001module_exit(sha1_mb_mod_fini);
1002
1003MODULE_LICENSE("GPL");
1004MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
1005
1006MODULE_ALIAS_CRYPTO("sha1");
1007