1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56#include <crypto/internal/hash.h>
57#include <linux/init.h>
58#include <linux/module.h>
59#include <linux/mm.h>
60#include <linux/cryptohash.h>
61#include <linux/types.h>
62#include <linux/list.h>
63#include <crypto/scatterwalk.h>
64#include <crypto/sha.h>
65#include <crypto/mcryptd.h>
66#include <crypto/crypto_wq.h>
67#include <asm/byteorder.h>
68#include <linux/hardirq.h>
69#include <asm/fpu/api.h>
70#include "sha512_mb_ctx.h"
71
72#define FLUSH_INTERVAL 1000
73
74static struct mcryptd_alg_state sha512_mb_alg_state;
75
76struct sha512_mb_ctx {
77 struct mcryptd_ahash *mcryptd_tfm;
78};
79
80static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha512_hash_ctx *hash_ctx)
82{
83 struct ahash_request *areq;
84
85 areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
86 return container_of(areq, struct mcryptd_hash_request_ctx, areq);
87}
88
89static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
91{
92 return container_of((void *) ctx, struct ahash_request, __ctx);
93}
94
95static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
96 struct ahash_request *areq)
97{
98 rctx->flag = HASH_UPDATE;
99}
100
101static asmlinkage void (*sha512_job_mgr_init)(struct sha512_mb_mgr *state);
102static asmlinkage struct job_sha512* (*sha512_job_mgr_submit)
103 (struct sha512_mb_mgr *state,
104 struct job_sha512 *job);
105static asmlinkage struct job_sha512* (*sha512_job_mgr_flush)
106 (struct sha512_mb_mgr *state);
107static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job)
108 (struct sha512_mb_mgr *state);
109
110inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
111 uint64_t total_len)
112{
113 uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1);
114
115 memset(&padblock[i], 0, SHA512_BLOCK_SIZE);
116 padblock[i] = 0x80;
117
118 i += ((SHA512_BLOCK_SIZE - 1) &
119 (0 - (total_len + SHA512_PADLENGTHFIELD_SIZE + 1)))
120 + 1 + SHA512_PADLENGTHFIELD_SIZE;
121
122#if SHA512_PADLENGTHFIELD_SIZE == 16
123 *((uint64_t *) &padblock[i - 16]) = 0;
124#endif
125
126 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
127
128
129 return i >> SHA512_LOG2_BLOCK_SIZE;
130}
131
132static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit
133 (struct sha512_ctx_mgr *mgr, struct sha512_hash_ctx *ctx)
134{
135 while (ctx) {
136 if (ctx->status & HASH_CTX_STS_COMPLETE) {
137
138 ctx->status = HASH_CTX_STS_COMPLETE;
139 return ctx;
140 }
141
142
143
144
145
146 if (ctx->partial_block_buffer_length == 0 &&
147 ctx->incoming_buffer_length) {
148
149 const void *buffer = ctx->incoming_buffer;
150 uint32_t len = ctx->incoming_buffer_length;
151 uint32_t copy_len;
152
153
154
155
156
157 copy_len = len & (SHA512_BLOCK_SIZE-1);
158
159 if (copy_len) {
160 len -= copy_len;
161 memcpy(ctx->partial_block_buffer,
162 ((const char *) buffer + len),
163 copy_len);
164 ctx->partial_block_buffer_length = copy_len;
165 }
166
167 ctx->incoming_buffer_length = 0;
168
169
170 assert((len % SHA512_BLOCK_SIZE) == 0);
171
172
173 len >>= SHA512_LOG2_BLOCK_SIZE;
174
175 if (len) {
176
177 ctx->job.buffer = (uint8_t *) buffer;
178 ctx->job.len = len;
179 ctx = (struct sha512_hash_ctx *)
180 sha512_job_mgr_submit(&mgr->mgr,
181 &ctx->job);
182 continue;
183 }
184 }
185
186
187
188
189
190
191 if (ctx->status & HASH_CTX_STS_LAST) {
192
193 uint8_t *buf = ctx->partial_block_buffer;
194 uint32_t n_extra_blocks =
195 sha512_pad(buf, ctx->total_length);
196
197 ctx->status = (HASH_CTX_STS_PROCESSING |
198 HASH_CTX_STS_COMPLETE);
199 ctx->job.buffer = buf;
200 ctx->job.len = (uint32_t) n_extra_blocks;
201 ctx = (struct sha512_hash_ctx *)
202 sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
203 continue;
204 }
205
206 if (ctx)
207 ctx->status = HASH_CTX_STS_IDLE;
208 return ctx;
209 }
210
211 return NULL;
212}
213
214static struct sha512_hash_ctx
215 *sha512_ctx_mgr_get_comp_ctx(struct mcryptd_alg_cstate *cstate)
216{
217
218
219
220
221
222
223
224
225
226
227 struct sha512_ctx_mgr *mgr;
228 struct sha512_hash_ctx *ctx;
229 unsigned long flags;
230
231 mgr = cstate->mgr;
232 spin_lock_irqsave(&cstate->work_lock, flags);
233 ctx = (struct sha512_hash_ctx *)
234 sha512_job_mgr_get_comp_job(&mgr->mgr);
235 ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
236 spin_unlock_irqrestore(&cstate->work_lock, flags);
237 return ctx;
238}
239
240static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
241{
242 sha512_job_mgr_init(&mgr->mgr);
243}
244
245static struct sha512_hash_ctx
246 *sha512_ctx_mgr_submit(struct mcryptd_alg_cstate *cstate,
247 struct sha512_hash_ctx *ctx,
248 const void *buffer,
249 uint32_t len,
250 int flags)
251{
252 struct sha512_ctx_mgr *mgr;
253 unsigned long irqflags;
254
255 mgr = cstate->mgr;
256 spin_lock_irqsave(&cstate->work_lock, irqflags);
257 if (flags & ~(HASH_UPDATE | HASH_LAST)) {
258
259 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
260 goto unlock;
261 }
262
263 if (ctx->status & HASH_CTX_STS_PROCESSING) {
264
265 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
266 goto unlock;
267 }
268
269 if (ctx->status & HASH_CTX_STS_COMPLETE) {
270
271 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
272 goto unlock;
273 }
274
275
276
277
278
279 ctx->error = HASH_CTX_ERROR_NONE;
280
281
282 ctx->incoming_buffer = buffer;
283 ctx->incoming_buffer_length = len;
284
285
286
287
288
289 ctx->status = (flags & HASH_LAST) ?
290 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
291 HASH_CTX_STS_PROCESSING;
292
293
294 ctx->total_length += len;
295
296
297
298
299
300
301
302 if (ctx->partial_block_buffer_length || len < SHA512_BLOCK_SIZE) {
303
304
305
306 uint32_t copy_len = SHA512_BLOCK_SIZE -
307 ctx->partial_block_buffer_length;
308 if (len < copy_len)
309 copy_len = len;
310
311 if (copy_len) {
312
313 memcpy
314 (&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
315 buffer, copy_len);
316
317 ctx->partial_block_buffer_length += copy_len;
318 ctx->incoming_buffer = (const void *)
319 ((const char *)buffer + copy_len);
320 ctx->incoming_buffer_length = len - copy_len;
321 }
322
323
324
325
326 assert(ctx->partial_block_buffer_length <= SHA512_BLOCK_SIZE);
327
328
329
330
331 if (ctx->partial_block_buffer_length >= SHA512_BLOCK_SIZE) {
332 ctx->partial_block_buffer_length = 0;
333
334 ctx->job.buffer = ctx->partial_block_buffer;
335 ctx->job.len = 1;
336 ctx = (struct sha512_hash_ctx *)
337 sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
338 }
339 }
340
341 ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
342unlock:
343 spin_unlock_irqrestore(&cstate->work_lock, irqflags);
344 return ctx;
345}
346
347static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct mcryptd_alg_cstate *cstate)
348{
349 struct sha512_ctx_mgr *mgr;
350 struct sha512_hash_ctx *ctx;
351 unsigned long flags;
352
353 mgr = cstate->mgr;
354 spin_lock_irqsave(&cstate->work_lock, flags);
355 while (1) {
356 ctx = (struct sha512_hash_ctx *)
357 sha512_job_mgr_flush(&mgr->mgr);
358
359
360 if (!ctx)
361 break;
362
363
364
365
366
367 ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
368
369
370
371
372
373
374 if (ctx)
375 break;
376 }
377 spin_unlock_irqrestore(&cstate->work_lock, flags);
378 return ctx;
379}
380
381static int sha512_mb_init(struct ahash_request *areq)
382{
383 struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
384
385 hash_ctx_init(sctx);
386 sctx->job.result_digest[0] = SHA512_H0;
387 sctx->job.result_digest[1] = SHA512_H1;
388 sctx->job.result_digest[2] = SHA512_H2;
389 sctx->job.result_digest[3] = SHA512_H3;
390 sctx->job.result_digest[4] = SHA512_H4;
391 sctx->job.result_digest[5] = SHA512_H5;
392 sctx->job.result_digest[6] = SHA512_H6;
393 sctx->job.result_digest[7] = SHA512_H7;
394 sctx->total_length = 0;
395 sctx->partial_block_buffer_length = 0;
396 sctx->status = HASH_CTX_STS_IDLE;
397
398 return 0;
399}
400
401static int sha512_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
402{
403 int i;
404 struct sha512_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
405 __be64 *dst = (__be64 *) rctx->out;
406
407 for (i = 0; i < 8; ++i)
408 dst[i] = cpu_to_be64(sctx->job.result_digest[i]);
409
410 return 0;
411}
412
413static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
414 struct mcryptd_alg_cstate *cstate, bool flush)
415{
416 int flag = HASH_UPDATE;
417 int nbytes, err = 0;
418 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
419 struct sha512_hash_ctx *sha_ctx;
420
421
422 while (!(rctx->flag & HASH_DONE)) {
423 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
424 if (nbytes < 0) {
425 err = nbytes;
426 goto out;
427 }
428
429 if (crypto_ahash_walk_last(&rctx->walk)) {
430 rctx->flag |= HASH_DONE;
431 if (rctx->flag & HASH_FINAL)
432 flag |= HASH_LAST;
433
434 }
435 sha_ctx = (struct sha512_hash_ctx *)
436 ahash_request_ctx(&rctx->areq);
437 kernel_fpu_begin();
438 sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx,
439 rctx->walk.data, nbytes, flag);
440 if (!sha_ctx) {
441 if (flush)
442 sha_ctx = sha512_ctx_mgr_flush(cstate);
443 }
444 kernel_fpu_end();
445 if (sha_ctx)
446 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
447 else {
448 rctx = NULL;
449 goto out;
450 }
451 }
452
453
454 if (rctx->flag & HASH_FINAL)
455 sha512_mb_set_results(rctx);
456
457out:
458 *ret_rctx = rctx;
459 return err;
460}
461
462static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
463 struct mcryptd_alg_cstate *cstate,
464 int err)
465{
466 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
467 struct sha512_hash_ctx *sha_ctx;
468 struct mcryptd_hash_request_ctx *req_ctx;
469 int ret;
470 unsigned long flags;
471
472
473 spin_lock_irqsave(&cstate->work_lock, flags);
474 list_del(&rctx->waiter);
475 spin_unlock_irqrestore(&cstate->work_lock, flags);
476
477 if (irqs_disabled())
478 rctx->complete(&req->base, err);
479 else {
480 local_bh_disable();
481 rctx->complete(&req->base, err);
482 local_bh_enable();
483 }
484
485
486 sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
487 while (sha_ctx) {
488 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
489 ret = sha_finish_walk(&req_ctx, cstate, false);
490 if (req_ctx) {
491 spin_lock_irqsave(&cstate->work_lock, flags);
492 list_del(&req_ctx->waiter);
493 spin_unlock_irqrestore(&cstate->work_lock, flags);
494
495 req = cast_mcryptd_ctx_to_req(req_ctx);
496 if (irqs_disabled())
497 req_ctx->complete(&req->base, ret);
498 else {
499 local_bh_disable();
500 req_ctx->complete(&req->base, ret);
501 local_bh_enable();
502 }
503 }
504 sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
505 }
506
507 return 0;
508}
509
510static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
511 struct mcryptd_alg_cstate *cstate)
512{
513 unsigned long next_flush;
514 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
515 unsigned long flags;
516
517
518 rctx->tag.arrival = jiffies;
519 rctx->tag.seq_num = cstate->next_seq_num++;
520 next_flush = rctx->tag.arrival + delay;
521 rctx->tag.expire = next_flush;
522
523 spin_lock_irqsave(&cstate->work_lock, flags);
524 list_add_tail(&rctx->waiter, &cstate->work_list);
525 spin_unlock_irqrestore(&cstate->work_lock, flags);
526
527 mcryptd_arm_flusher(cstate, delay);
528}
529
530static int sha512_mb_update(struct ahash_request *areq)
531{
532 struct mcryptd_hash_request_ctx *rctx =
533 container_of(areq, struct mcryptd_hash_request_ctx,
534 areq);
535 struct mcryptd_alg_cstate *cstate =
536 this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
537
538 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
539 struct sha512_hash_ctx *sha_ctx;
540 int ret = 0, nbytes;
541
542
543
544 if (rctx->tag.cpu != smp_processor_id()) {
545 pr_err("mcryptd error: cpu clash\n");
546 goto done;
547 }
548
549
550 req_ctx_init(rctx, areq);
551
552 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
553
554 if (nbytes < 0) {
555 ret = nbytes;
556 goto done;
557 }
558
559 if (crypto_ahash_walk_last(&rctx->walk))
560 rctx->flag |= HASH_DONE;
561
562
563 sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
564 sha512_mb_add_list(rctx, cstate);
565 kernel_fpu_begin();
566 sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
567 nbytes, HASH_UPDATE);
568 kernel_fpu_end();
569
570
571 if (!sha_ctx)
572 return -EINPROGRESS;
573
574 if (sha_ctx->error) {
575 ret = sha_ctx->error;
576 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
577 goto done;
578 }
579
580 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
581 ret = sha_finish_walk(&rctx, cstate, false);
582
583 if (!rctx)
584 return -EINPROGRESS;
585done:
586 sha_complete_job(rctx, cstate, ret);
587 return ret;
588}
589
590static int sha512_mb_finup(struct ahash_request *areq)
591{
592 struct mcryptd_hash_request_ctx *rctx =
593 container_of(areq, struct mcryptd_hash_request_ctx,
594 areq);
595 struct mcryptd_alg_cstate *cstate =
596 this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
597
598 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
599 struct sha512_hash_ctx *sha_ctx;
600 int ret = 0, flag = HASH_UPDATE, nbytes;
601
602
603 if (rctx->tag.cpu != smp_processor_id()) {
604 pr_err("mcryptd error: cpu clash\n");
605 goto done;
606 }
607
608
609 req_ctx_init(rctx, areq);
610
611 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
612
613 if (nbytes < 0) {
614 ret = nbytes;
615 goto done;
616 }
617
618 if (crypto_ahash_walk_last(&rctx->walk)) {
619 rctx->flag |= HASH_DONE;
620 flag = HASH_LAST;
621 }
622
623
624 rctx->flag |= HASH_FINAL;
625 sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
626 sha512_mb_add_list(rctx, cstate);
627
628 kernel_fpu_begin();
629 sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
630 nbytes, flag);
631 kernel_fpu_end();
632
633
634 if (!sha_ctx)
635 return -EINPROGRESS;
636
637 if (sha_ctx->error) {
638 ret = sha_ctx->error;
639 goto done;
640 }
641
642 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
643 ret = sha_finish_walk(&rctx, cstate, false);
644 if (!rctx)
645 return -EINPROGRESS;
646done:
647 sha_complete_job(rctx, cstate, ret);
648 return ret;
649}
650
651static int sha512_mb_final(struct ahash_request *areq)
652{
653 struct mcryptd_hash_request_ctx *rctx =
654 container_of(areq, struct mcryptd_hash_request_ctx,
655 areq);
656 struct mcryptd_alg_cstate *cstate =
657 this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
658
659 struct sha512_hash_ctx *sha_ctx;
660 int ret = 0;
661 u8 data;
662
663
664 if (rctx->tag.cpu != smp_processor_id()) {
665 pr_err("mcryptd error: cpu clash\n");
666 goto done;
667 }
668
669
670 req_ctx_init(rctx, areq);
671
672 rctx->flag |= HASH_DONE | HASH_FINAL;
673
674 sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
675
676 sha512_mb_add_list(rctx, cstate);
677 kernel_fpu_begin();
678 sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, &data, 0, HASH_LAST);
679 kernel_fpu_end();
680
681
682 if (!sha_ctx)
683 return -EINPROGRESS;
684
685 if (sha_ctx->error) {
686 ret = sha_ctx->error;
687 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
688 goto done;
689 }
690
691 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
692 ret = sha_finish_walk(&rctx, cstate, false);
693 if (!rctx)
694 return -EINPROGRESS;
695done:
696 sha_complete_job(rctx, cstate, ret);
697 return ret;
698}
699
700static int sha512_mb_export(struct ahash_request *areq, void *out)
701{
702 struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
703
704 memcpy(out, sctx, sizeof(*sctx));
705
706 return 0;
707}
708
709static int sha512_mb_import(struct ahash_request *areq, const void *in)
710{
711 struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
712
713 memcpy(sctx, in, sizeof(*sctx));
714
715 return 0;
716}
717
718static int sha512_mb_async_init_tfm(struct crypto_tfm *tfm)
719{
720 struct mcryptd_ahash *mcryptd_tfm;
721 struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
722 struct mcryptd_hash_ctx *mctx;
723
724 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha512-mb",
725 CRYPTO_ALG_INTERNAL,
726 CRYPTO_ALG_INTERNAL);
727 if (IS_ERR(mcryptd_tfm))
728 return PTR_ERR(mcryptd_tfm);
729 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
730 mctx->alg_state = &sha512_mb_alg_state;
731 ctx->mcryptd_tfm = mcryptd_tfm;
732 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
733 sizeof(struct ahash_request) +
734 crypto_ahash_reqsize(&mcryptd_tfm->base));
735
736 return 0;
737}
738
739static void sha512_mb_async_exit_tfm(struct crypto_tfm *tfm)
740{
741 struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
742
743 mcryptd_free_ahash(ctx->mcryptd_tfm);
744}
745
746static int sha512_mb_areq_init_tfm(struct crypto_tfm *tfm)
747{
748 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
749 sizeof(struct ahash_request) +
750 sizeof(struct sha512_hash_ctx));
751
752 return 0;
753}
754
755static void sha512_mb_areq_exit_tfm(struct crypto_tfm *tfm)
756{
757 struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
758
759 mcryptd_free_ahash(ctx->mcryptd_tfm);
760}
761
762static struct ahash_alg sha512_mb_areq_alg = {
763 .init = sha512_mb_init,
764 .update = sha512_mb_update,
765 .final = sha512_mb_final,
766 .finup = sha512_mb_finup,
767 .export = sha512_mb_export,
768 .import = sha512_mb_import,
769 .halg = {
770 .digestsize = SHA512_DIGEST_SIZE,
771 .statesize = sizeof(struct sha512_hash_ctx),
772 .base = {
773 .cra_name = "__sha512-mb",
774 .cra_driver_name = "__intel_sha512-mb",
775 .cra_priority = 100,
776
777
778
779
780
781 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
782 CRYPTO_ALG_ASYNC |
783 CRYPTO_ALG_INTERNAL,
784 .cra_blocksize = SHA512_BLOCK_SIZE,
785 .cra_module = THIS_MODULE,
786 .cra_list = LIST_HEAD_INIT
787 (sha512_mb_areq_alg.halg.base.cra_list),
788 .cra_init = sha512_mb_areq_init_tfm,
789 .cra_exit = sha512_mb_areq_exit_tfm,
790 .cra_ctxsize = sizeof(struct sha512_hash_ctx),
791 }
792 }
793};
794
795static int sha512_mb_async_init(struct ahash_request *req)
796{
797 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
798 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
799 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
800 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
801
802 memcpy(mcryptd_req, req, sizeof(*req));
803 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
804 return crypto_ahash_init(mcryptd_req);
805}
806
807static int sha512_mb_async_update(struct ahash_request *req)
808{
809 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
810
811 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
812 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
813 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
814
815 memcpy(mcryptd_req, req, sizeof(*req));
816 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
817 return crypto_ahash_update(mcryptd_req);
818}
819
820static int sha512_mb_async_finup(struct ahash_request *req)
821{
822 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
823
824 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
825 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
826 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
827
828 memcpy(mcryptd_req, req, sizeof(*req));
829 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
830 return crypto_ahash_finup(mcryptd_req);
831}
832
833static int sha512_mb_async_final(struct ahash_request *req)
834{
835 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
836
837 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
838 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
839 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
840
841 memcpy(mcryptd_req, req, sizeof(*req));
842 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
843 return crypto_ahash_final(mcryptd_req);
844}
845
846static int sha512_mb_async_digest(struct ahash_request *req)
847{
848 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
849 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
850 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
851 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
852
853 memcpy(mcryptd_req, req, sizeof(*req));
854 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
855 return crypto_ahash_digest(mcryptd_req);
856}
857
858static int sha512_mb_async_export(struct ahash_request *req, void *out)
859{
860 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
861 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
862 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
863 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
864
865 memcpy(mcryptd_req, req, sizeof(*req));
866 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
867 return crypto_ahash_export(mcryptd_req, out);
868}
869
870static int sha512_mb_async_import(struct ahash_request *req, const void *in)
871{
872 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
873 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
874 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
875 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
876 struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
877 struct mcryptd_hash_request_ctx *rctx;
878 struct ahash_request *areq;
879
880 memcpy(mcryptd_req, req, sizeof(*req));
881 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
882 rctx = ahash_request_ctx(mcryptd_req);
883
884 areq = &rctx->areq;
885
886 ahash_request_set_tfm(areq, child);
887 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
888 rctx->complete, req);
889
890 return crypto_ahash_import(mcryptd_req, in);
891}
892
893static struct ahash_alg sha512_mb_async_alg = {
894 .init = sha512_mb_async_init,
895 .update = sha512_mb_async_update,
896 .final = sha512_mb_async_final,
897 .finup = sha512_mb_async_finup,
898 .digest = sha512_mb_async_digest,
899 .export = sha512_mb_async_export,
900 .import = sha512_mb_async_import,
901 .halg = {
902 .digestsize = SHA512_DIGEST_SIZE,
903 .statesize = sizeof(struct sha512_hash_ctx),
904 .base = {
905 .cra_name = "sha512",
906 .cra_driver_name = "sha512_mb",
907 .cra_priority = 200,
908 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
909 CRYPTO_ALG_ASYNC,
910 .cra_blocksize = SHA512_BLOCK_SIZE,
911 .cra_type = &crypto_ahash_type,
912 .cra_module = THIS_MODULE,
913 .cra_list = LIST_HEAD_INIT
914 (sha512_mb_async_alg.halg.base.cra_list),
915 .cra_init = sha512_mb_async_init_tfm,
916 .cra_exit = sha512_mb_async_exit_tfm,
917 .cra_ctxsize = sizeof(struct sha512_mb_ctx),
918 .cra_alignmask = 0,
919 },
920 },
921};
922
923static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate)
924{
925 struct mcryptd_hash_request_ctx *rctx;
926 unsigned long cur_time;
927 unsigned long next_flush = 0;
928 struct sha512_hash_ctx *sha_ctx;
929
930
931 cur_time = jiffies;
932
933 while (!list_empty(&cstate->work_list)) {
934 rctx = list_entry(cstate->work_list.next,
935 struct mcryptd_hash_request_ctx, waiter);
936 if time_before(cur_time, rctx->tag.expire)
937 break;
938 kernel_fpu_begin();
939 sha_ctx = (struct sha512_hash_ctx *)
940 sha512_ctx_mgr_flush(cstate);
941 kernel_fpu_end();
942 if (!sha_ctx) {
943 pr_err("sha512_mb error: nothing got flushed for"
944 " non-empty list\n");
945 break;
946 }
947 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
948 sha_finish_walk(&rctx, cstate, true);
949 sha_complete_job(rctx, cstate, 0);
950 }
951
952 if (!list_empty(&cstate->work_list)) {
953 rctx = list_entry(cstate->work_list.next,
954 struct mcryptd_hash_request_ctx, waiter);
955
956 next_flush = rctx->tag.expire;
957 mcryptd_arm_flusher(cstate, get_delay(next_flush));
958 }
959 return next_flush;
960}
961
962static int __init sha512_mb_mod_init(void)
963{
964
965 int cpu;
966 int err;
967 struct mcryptd_alg_cstate *cpu_state;
968
969
970 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
971 !boot_cpu_has(X86_FEATURE_BMI2))
972 return -ENODEV;
973
974
975 sha512_mb_alg_state.alg_cstate =
976 alloc_percpu(struct mcryptd_alg_cstate);
977
978 sha512_job_mgr_init = sha512_mb_mgr_init_avx2;
979 sha512_job_mgr_submit = sha512_mb_mgr_submit_avx2;
980 sha512_job_mgr_flush = sha512_mb_mgr_flush_avx2;
981 sha512_job_mgr_get_comp_job = sha512_mb_mgr_get_comp_job_avx2;
982
983 if (!sha512_mb_alg_state.alg_cstate)
984 return -ENOMEM;
985 for_each_possible_cpu(cpu) {
986 cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
987 cpu_state->next_flush = 0;
988 cpu_state->next_seq_num = 0;
989 cpu_state->flusher_engaged = false;
990 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
991 cpu_state->cpu = cpu;
992 cpu_state->alg_state = &sha512_mb_alg_state;
993 cpu_state->mgr = kzalloc(sizeof(struct sha512_ctx_mgr),
994 GFP_KERNEL);
995 if (!cpu_state->mgr)
996 goto err2;
997 sha512_ctx_mgr_init(cpu_state->mgr);
998 INIT_LIST_HEAD(&cpu_state->work_list);
999 spin_lock_init(&cpu_state->work_lock);
1000 }
1001 sha512_mb_alg_state.flusher = &sha512_mb_flusher;
1002
1003 err = crypto_register_ahash(&sha512_mb_areq_alg);
1004 if (err)
1005 goto err2;
1006 err = crypto_register_ahash(&sha512_mb_async_alg);
1007 if (err)
1008 goto err1;
1009
1010
1011 return 0;
1012err1:
1013 crypto_unregister_ahash(&sha512_mb_areq_alg);
1014err2:
1015 for_each_possible_cpu(cpu) {
1016 cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
1017 kfree(cpu_state->mgr);
1018 }
1019 free_percpu(sha512_mb_alg_state.alg_cstate);
1020 return -ENODEV;
1021}
1022
1023static void __exit sha512_mb_mod_fini(void)
1024{
1025 int cpu;
1026 struct mcryptd_alg_cstate *cpu_state;
1027
1028 crypto_unregister_ahash(&sha512_mb_async_alg);
1029 crypto_unregister_ahash(&sha512_mb_areq_alg);
1030 for_each_possible_cpu(cpu) {
1031 cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
1032 kfree(cpu_state->mgr);
1033 }
1034 free_percpu(sha512_mb_alg_state.alg_cstate);
1035}
1036
1037module_init(sha512_mb_mod_init);
1038module_exit(sha512_mb_mod_fini);
1039
1040MODULE_LICENSE("GPL");
1041MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, multi buffer accelerated");
1042
1043MODULE_ALIAS("sha512");
1044