linux/arch/x86/crypto/sha1-mb/sha1_mb.c
<<
>>
Prefs
   1/*
   2 * Multi buffer SHA1 algorithm Glue Code
   3 *
   4 * This file is provided under a dual BSD/GPLv2 license.  When using or
   5 * redistributing this file, you may do so under either license.
   6 *
   7 * GPL LICENSE SUMMARY
   8 *
   9 *  Copyright(c) 2014 Intel Corporation.
  10 *
  11 *  This program is free software; you can redistribute it and/or modify
  12 *  it under the terms of version 2 of the GNU General Public License as
  13 *  published by the Free Software Foundation.
  14 *
  15 *  This program is distributed in the hope that it will be useful, but
  16 *  WITHOUT ANY WARRANTY; without even the implied warranty of
  17 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 *  General Public License for more details.
  19 *
  20 *  Contact Information:
  21 *      Tim Chen <tim.c.chen@linux.intel.com>
  22 *
  23 *  BSD LICENSE
  24 *
  25 *  Copyright(c) 2014 Intel Corporation.
  26 *
  27 *  Redistribution and use in source and binary forms, with or without
  28 *  modification, are permitted provided that the following conditions
  29 *  are met:
  30 *
  31 *    * Redistributions of source code must retain the above copyright
  32 *      notice, this list of conditions and the following disclaimer.
  33 *    * Redistributions in binary form must reproduce the above copyright
  34 *      notice, this list of conditions and the following disclaimer in
  35 *      the documentation and/or other materials provided with the
  36 *      distribution.
  37 *    * Neither the name of Intel Corporation nor the names of its
  38 *      contributors may be used to endorse or promote products derived
  39 *      from this software without specific prior written permission.
  40 *
  41 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  42 *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  43 *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  44 *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  45 *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  46 *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  47 *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  48 *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  49 *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  50 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  52 */
  53
  54#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
  55
  56#include <crypto/internal/hash.h>
  57#include <linux/init.h>
  58#include <linux/module.h>
  59#include <linux/mm.h>
  60#include <linux/cryptohash.h>
  61#include <linux/types.h>
  62#include <linux/list.h>
  63#include <crypto/scatterwalk.h>
  64#include <crypto/sha.h>
  65#include <crypto/mcryptd.h>
  66#include <crypto/crypto_wq.h>
  67#include <asm/byteorder.h>
  68#include <linux/hardirq.h>
  69#include <asm/fpu/api.h>
  70#include "sha1_mb_ctx.h"
  71
  72#define FLUSH_INTERVAL 1000 /* in usec */
  73
  74static struct mcryptd_alg_state sha1_mb_alg_state;
  75
  76struct sha1_mb_ctx {
  77        struct mcryptd_ahash *mcryptd_tfm;
  78};
  79
  80static inline struct mcryptd_hash_request_ctx
  81                *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
  82{
  83        struct ahash_request *areq;
  84
  85        areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
  86        return container_of(areq, struct mcryptd_hash_request_ctx, areq);
  87}
  88
  89static inline struct ahash_request
  90                *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
  91{
  92        return container_of((void *) ctx, struct ahash_request, __ctx);
  93}
  94
  95static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
  96                                struct ahash_request *areq)
  97{
  98        rctx->flag = HASH_UPDATE;
  99}
 100
 101static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
 102static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)
 103                        (struct sha1_mb_mgr *state, struct job_sha1 *job);
 104static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
 105                                                (struct sha1_mb_mgr *state);
 106static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
 107                                                (struct sha1_mb_mgr *state);
 108
 109static inline void sha1_init_digest(uint32_t *digest)
 110{
 111        static const uint32_t initial_digest[SHA1_DIGEST_LENGTH] = {SHA1_H0,
 112                                        SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
 113        memcpy(digest, initial_digest, sizeof(initial_digest));
 114}
 115
 116static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
 117                         uint64_t total_len)
 118{
 119        uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
 120
 121        memset(&padblock[i], 0, SHA1_BLOCK_SIZE);
 122        padblock[i] = 0x80;
 123
 124        i += ((SHA1_BLOCK_SIZE - 1) &
 125              (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1)))
 126             + 1 + SHA1_PADLENGTHFIELD_SIZE;
 127
 128#if SHA1_PADLENGTHFIELD_SIZE == 16
 129        *((uint64_t *) &padblock[i - 16]) = 0;
 130#endif
 131
 132        *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
 133
 134        /* Number of extra blocks to hash */
 135        return i >> SHA1_LOG2_BLOCK_SIZE;
 136}
 137
 138static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr,
 139                                                struct sha1_hash_ctx *ctx)
 140{
 141        while (ctx) {
 142                if (ctx->status & HASH_CTX_STS_COMPLETE) {
 143                        /* Clear PROCESSING bit */
 144                        ctx->status = HASH_CTX_STS_COMPLETE;
 145                        return ctx;
 146                }
 147
 148                /*
 149                 * If the extra blocks are empty, begin hashing what remains
 150                 * in the user's buffer.
 151                 */
 152                if (ctx->partial_block_buffer_length == 0 &&
 153                    ctx->incoming_buffer_length) {
 154
 155                        const void *buffer = ctx->incoming_buffer;
 156                        uint32_t len = ctx->incoming_buffer_length;
 157                        uint32_t copy_len;
 158
 159                        /*
 160                         * Only entire blocks can be hashed.
 161                         * Copy remainder to extra blocks buffer.
 162                         */
 163                        copy_len = len & (SHA1_BLOCK_SIZE-1);
 164
 165                        if (copy_len) {
 166                                len -= copy_len;
 167                                memcpy(ctx->partial_block_buffer,
 168                                       ((const char *) buffer + len),
 169                                       copy_len);
 170                                ctx->partial_block_buffer_length = copy_len;
 171                        }
 172
 173                        ctx->incoming_buffer_length = 0;
 174
 175                        /* len should be a multiple of the block size now */
 176                        assert((len % SHA1_BLOCK_SIZE) == 0);
 177
 178                        /* Set len to the number of blocks to be hashed */
 179                        len >>= SHA1_LOG2_BLOCK_SIZE;
 180
 181                        if (len) {
 182
 183                                ctx->job.buffer = (uint8_t *) buffer;
 184                                ctx->job.len = len;
 185                                ctx = (struct sha1_hash_ctx *)sha1_job_mgr_submit(&mgr->mgr,
 186                                                                                &ctx->job);
 187                                continue;
 188                        }
 189                }
 190
 191                /*
 192                 * If the extra blocks are not empty, then we are
 193                 * either on the last block(s) or we need more
 194                 * user input before continuing.
 195                 */
 196                if (ctx->status & HASH_CTX_STS_LAST) {
 197
 198                        uint8_t *buf = ctx->partial_block_buffer;
 199                        uint32_t n_extra_blocks =
 200                                        sha1_pad(buf, ctx->total_length);
 201
 202                        ctx->status = (HASH_CTX_STS_PROCESSING |
 203                                       HASH_CTX_STS_COMPLETE);
 204                        ctx->job.buffer = buf;
 205                        ctx->job.len = (uint32_t) n_extra_blocks;
 206                        ctx = (struct sha1_hash_ctx *)
 207                                sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
 208                        continue;
 209                }
 210
 211                ctx->status = HASH_CTX_STS_IDLE;
 212                return ctx;
 213        }
 214
 215        return NULL;
 216}
 217
 218static struct sha1_hash_ctx
 219                        *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
 220{
 221        /*
 222         * If get_comp_job returns NULL, there are no jobs complete.
 223         * If get_comp_job returns a job, verify that it is safe to return to
 224         * the user.
 225         * If it is not ready, resubmit the job to finish processing.
 226         * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
 227         * Otherwise, all jobs currently being managed by the hash_ctx_mgr
 228         * still need processing.
 229         */
 230        struct sha1_hash_ctx *ctx;
 231
 232        ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr);
 233        return sha1_ctx_mgr_resubmit(mgr, ctx);
 234}
 235
 236static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr)
 237{
 238        sha1_job_mgr_init(&mgr->mgr);
 239}
 240
 241static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
 242                                          struct sha1_hash_ctx *ctx,
 243                                          const void *buffer,
 244                                          uint32_t len,
 245                                          int flags)
 246{
 247        if (flags & (~HASH_ENTIRE)) {
 248                /*
 249                 * User should not pass anything other than FIRST, UPDATE, or
 250                 * LAST
 251                 */
 252                ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
 253                return ctx;
 254        }
 255
 256        if (ctx->status & HASH_CTX_STS_PROCESSING) {
 257                /* Cannot submit to a currently processing job. */
 258                ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
 259                return ctx;
 260        }
 261
 262        if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
 263                /* Cannot update a finished job. */
 264                ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
 265                return ctx;
 266        }
 267
 268
 269        if (flags & HASH_FIRST) {
 270                /* Init digest */
 271                sha1_init_digest(ctx->job.result_digest);
 272
 273                /* Reset byte counter */
 274                ctx->total_length = 0;
 275
 276                /* Clear extra blocks */
 277                ctx->partial_block_buffer_length = 0;
 278        }
 279
 280        /*
 281         * If we made it here, there were no errors during this call to
 282         * submit
 283         */
 284        ctx->error = HASH_CTX_ERROR_NONE;
 285
 286        /* Store buffer ptr info from user */
 287        ctx->incoming_buffer = buffer;
 288        ctx->incoming_buffer_length = len;
 289
 290        /*
 291         * Store the user's request flags and mark this ctx as currently
 292         * being processed.
 293         */
 294        ctx->status = (flags & HASH_LAST) ?
 295                        (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
 296                        HASH_CTX_STS_PROCESSING;
 297
 298        /* Advance byte counter */
 299        ctx->total_length += len;
 300
 301        /*
 302         * If there is anything currently buffered in the extra blocks,
 303         * append to it until it contains a whole block.
 304         * Or if the user's buffer contains less than a whole block,
 305         * append as much as possible to the extra block.
 306         */
 307        if (ctx->partial_block_buffer_length || len < SHA1_BLOCK_SIZE) {
 308                /*
 309                 * Compute how many bytes to copy from user buffer into
 310                 * extra block
 311                 */
 312                uint32_t copy_len = SHA1_BLOCK_SIZE -
 313                                        ctx->partial_block_buffer_length;
 314                if (len < copy_len)
 315                        copy_len = len;
 316
 317                if (copy_len) {
 318                        /* Copy and update relevant pointers and counters */
 319                        memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
 320                                buffer, copy_len);
 321
 322                        ctx->partial_block_buffer_length += copy_len;
 323                        ctx->incoming_buffer = (const void *)
 324                                        ((const char *)buffer + copy_len);
 325                        ctx->incoming_buffer_length = len - copy_len;
 326                }
 327
 328                /*
 329                 * The extra block should never contain more than 1 block
 330                 * here
 331                 */
 332                assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
 333
 334                /*
 335                 * If the extra block buffer contains exactly 1 block, it can
 336                 * be hashed.
 337                 */
 338                if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
 339                        ctx->partial_block_buffer_length = 0;
 340
 341                        ctx->job.buffer = ctx->partial_block_buffer;
 342                        ctx->job.len = 1;
 343                        ctx = (struct sha1_hash_ctx *)
 344                                sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
 345                }
 346        }
 347
 348        return sha1_ctx_mgr_resubmit(mgr, ctx);
 349}
 350
 351static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
 352{
 353        struct sha1_hash_ctx *ctx;
 354
 355        while (1) {
 356                ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr);
 357
 358                /* If flush returned 0, there are no more jobs in flight. */
 359                if (!ctx)
 360                        return NULL;
 361
 362                /*
 363                 * If flush returned a job, resubmit the job to finish
 364                 * processing.
 365                 */
 366                ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
 367
 368                /*
 369                 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be
 370                 * returned. Otherwise, all jobs currently being managed by the
 371                 * sha1_ctx_mgr still need processing. Loop.
 372                 */
 373                if (ctx)
 374                        return ctx;
 375        }
 376}
 377
 378static int sha1_mb_init(struct ahash_request *areq)
 379{
 380        struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
 381
 382        hash_ctx_init(sctx);
 383        sctx->job.result_digest[0] = SHA1_H0;
 384        sctx->job.result_digest[1] = SHA1_H1;
 385        sctx->job.result_digest[2] = SHA1_H2;
 386        sctx->job.result_digest[3] = SHA1_H3;
 387        sctx->job.result_digest[4] = SHA1_H4;
 388        sctx->total_length = 0;
 389        sctx->partial_block_buffer_length = 0;
 390        sctx->status = HASH_CTX_STS_IDLE;
 391
 392        return 0;
 393}
 394
 395static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
 396{
 397        int     i;
 398        struct  sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
 399        __be32  *dst = (__be32 *) rctx->out;
 400
 401        for (i = 0; i < 5; ++i)
 402                dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
 403
 404        return 0;
 405}
 406
 407static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
 408                        struct mcryptd_alg_cstate *cstate, bool flush)
 409{
 410        int     flag = HASH_UPDATE;
 411        int     nbytes, err = 0;
 412        struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
 413        struct sha1_hash_ctx *sha_ctx;
 414
 415        /* more work ? */
 416        while (!(rctx->flag & HASH_DONE)) {
 417                nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
 418                if (nbytes < 0) {
 419                        err = nbytes;
 420                        goto out;
 421                }
 422                /* check if the walk is done */
 423                if (crypto_ahash_walk_last(&rctx->walk)) {
 424                        rctx->flag |= HASH_DONE;
 425                        if (rctx->flag & HASH_FINAL)
 426                                flag |= HASH_LAST;
 427
 428                }
 429                sha_ctx = (struct sha1_hash_ctx *)
 430                                                ahash_request_ctx(&rctx->areq);
 431                kernel_fpu_begin();
 432                sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
 433                                                rctx->walk.data, nbytes, flag);
 434                if (!sha_ctx) {
 435                        if (flush)
 436                                sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
 437                }
 438                kernel_fpu_end();
 439                if (sha_ctx)
 440                        rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
 441                else {
 442                        rctx = NULL;
 443                        goto out;
 444                }
 445        }
 446
 447        /* copy the results */
 448        if (rctx->flag & HASH_FINAL)
 449                sha1_mb_set_results(rctx);
 450
 451out:
 452        *ret_rctx = rctx;
 453        return err;
 454}
 455
 456static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
 457                            struct mcryptd_alg_cstate *cstate,
 458                            int err)
 459{
 460        struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
 461        struct sha1_hash_ctx *sha_ctx;
 462        struct mcryptd_hash_request_ctx *req_ctx;
 463        int ret;
 464
 465        /* remove from work list */
 466        spin_lock(&cstate->work_lock);
 467        list_del(&rctx->waiter);
 468        spin_unlock(&cstate->work_lock);
 469
 470        if (irqs_disabled())
 471                rctx->complete(&req->base, err);
 472        else {
 473                local_bh_disable();
 474                rctx->complete(&req->base, err);
 475                local_bh_enable();
 476        }
 477
 478        /* check to see if there are other jobs that are done */
 479        sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
 480        while (sha_ctx) {
 481                req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
 482                ret = sha_finish_walk(&req_ctx, cstate, false);
 483                if (req_ctx) {
 484                        spin_lock(&cstate->work_lock);
 485                        list_del(&req_ctx->waiter);
 486                        spin_unlock(&cstate->work_lock);
 487
 488                        req = cast_mcryptd_ctx_to_req(req_ctx);
 489                        if (irqs_disabled())
 490                                req_ctx->complete(&req->base, ret);
 491                        else {
 492                                local_bh_disable();
 493                                req_ctx->complete(&req->base, ret);
 494                                local_bh_enable();
 495                        }
 496                }
 497                sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
 498        }
 499
 500        return 0;
 501}
 502
 503static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
 504                             struct mcryptd_alg_cstate *cstate)
 505{
 506        unsigned long next_flush;
 507        unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
 508
 509        /* initialize tag */
 510        rctx->tag.arrival = jiffies;    /* tag the arrival time */
 511        rctx->tag.seq_num = cstate->next_seq_num++;
 512        next_flush = rctx->tag.arrival + delay;
 513        rctx->tag.expire = next_flush;
 514
 515        spin_lock(&cstate->work_lock);
 516        list_add_tail(&rctx->waiter, &cstate->work_list);
 517        spin_unlock(&cstate->work_lock);
 518
 519        mcryptd_arm_flusher(cstate, delay);
 520}
 521
 522static int sha1_mb_update(struct ahash_request *areq)
 523{
 524        struct mcryptd_hash_request_ctx *rctx =
 525                container_of(areq, struct mcryptd_hash_request_ctx, areq);
 526        struct mcryptd_alg_cstate *cstate =
 527                                this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
 528
 529        struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
 530        struct sha1_hash_ctx *sha_ctx;
 531        int ret = 0, nbytes;
 532
 533
 534        /* sanity check */
 535        if (rctx->tag.cpu != smp_processor_id()) {
 536                pr_err("mcryptd error: cpu clash\n");
 537                goto done;
 538        }
 539
 540        /* need to init context */
 541        req_ctx_init(rctx, areq);
 542
 543        nbytes = crypto_ahash_walk_first(req, &rctx->walk);
 544
 545        if (nbytes < 0) {
 546                ret = nbytes;
 547                goto done;
 548        }
 549
 550        if (crypto_ahash_walk_last(&rctx->walk))
 551                rctx->flag |= HASH_DONE;
 552
 553        /* submit */
 554        sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
 555        sha1_mb_add_list(rctx, cstate);
 556        kernel_fpu_begin();
 557        sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
 558                                                        nbytes, HASH_UPDATE);
 559        kernel_fpu_end();
 560
 561        /* check if anything is returned */
 562        if (!sha_ctx)
 563                return -EINPROGRESS;
 564
 565        if (sha_ctx->error) {
 566                ret = sha_ctx->error;
 567                rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
 568                goto done;
 569        }
 570
 571        rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
 572        ret = sha_finish_walk(&rctx, cstate, false);
 573
 574        if (!rctx)
 575                return -EINPROGRESS;
 576done:
 577        sha_complete_job(rctx, cstate, ret);
 578        return ret;
 579}
 580
 581static int sha1_mb_finup(struct ahash_request *areq)
 582{
 583        struct mcryptd_hash_request_ctx *rctx =
 584                container_of(areq, struct mcryptd_hash_request_ctx, areq);
 585        struct mcryptd_alg_cstate *cstate =
 586                                this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
 587
 588        struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
 589        struct sha1_hash_ctx *sha_ctx;
 590        int ret = 0, flag = HASH_UPDATE, nbytes;
 591
 592        /* sanity check */
 593        if (rctx->tag.cpu != smp_processor_id()) {
 594                pr_err("mcryptd error: cpu clash\n");
 595                goto done;
 596        }
 597
 598        /* need to init context */
 599        req_ctx_init(rctx, areq);
 600
 601        nbytes = crypto_ahash_walk_first(req, &rctx->walk);
 602
 603        if (nbytes < 0) {
 604                ret = nbytes;
 605                goto done;
 606        }
 607
 608        if (crypto_ahash_walk_last(&rctx->walk)) {
 609                rctx->flag |= HASH_DONE;
 610                flag = HASH_LAST;
 611        }
 612
 613        /* submit */
 614        rctx->flag |= HASH_FINAL;
 615        sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
 616        sha1_mb_add_list(rctx, cstate);
 617
 618        kernel_fpu_begin();
 619        sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
 620                                                                nbytes, flag);
 621        kernel_fpu_end();
 622
 623        /* check if anything is returned */
 624        if (!sha_ctx)
 625                return -EINPROGRESS;
 626
 627        if (sha_ctx->error) {
 628                ret = sha_ctx->error;
 629                goto done;
 630        }
 631
 632        rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
 633        ret = sha_finish_walk(&rctx, cstate, false);
 634        if (!rctx)
 635                return -EINPROGRESS;
 636done:
 637        sha_complete_job(rctx, cstate, ret);
 638        return ret;
 639}
 640
 641static int sha1_mb_final(struct ahash_request *areq)
 642{
 643        struct mcryptd_hash_request_ctx *rctx =
 644                container_of(areq, struct mcryptd_hash_request_ctx, areq);
 645        struct mcryptd_alg_cstate *cstate =
 646                                this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
 647
 648        struct sha1_hash_ctx *sha_ctx;
 649        int ret = 0;
 650        u8 data;
 651
 652        /* sanity check */
 653        if (rctx->tag.cpu != smp_processor_id()) {
 654                pr_err("mcryptd error: cpu clash\n");
 655                goto done;
 656        }
 657
 658        /* need to init context */
 659        req_ctx_init(rctx, areq);
 660
 661        rctx->flag |= HASH_DONE | HASH_FINAL;
 662
 663        sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
 664        /* flag HASH_FINAL and 0 data size */
 665        sha1_mb_add_list(rctx, cstate);
 666        kernel_fpu_begin();
 667        sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
 668                                                                HASH_LAST);
 669        kernel_fpu_end();
 670
 671        /* check if anything is returned */
 672        if (!sha_ctx)
 673                return -EINPROGRESS;
 674
 675        if (sha_ctx->error) {
 676                ret = sha_ctx->error;
 677                rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
 678                goto done;
 679        }
 680
 681        rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
 682        ret = sha_finish_walk(&rctx, cstate, false);
 683        if (!rctx)
 684                return -EINPROGRESS;
 685done:
 686        sha_complete_job(rctx, cstate, ret);
 687        return ret;
 688}
 689
 690static int sha1_mb_export(struct ahash_request *areq, void *out)
 691{
 692        struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
 693
 694        memcpy(out, sctx, sizeof(*sctx));
 695
 696        return 0;
 697}
 698
 699static int sha1_mb_import(struct ahash_request *areq, const void *in)
 700{
 701        struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
 702
 703        memcpy(sctx, in, sizeof(*sctx));
 704
 705        return 0;
 706}
 707
 708static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
 709{
 710        struct mcryptd_ahash *mcryptd_tfm;
 711        struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
 712        struct mcryptd_hash_ctx *mctx;
 713
 714        mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
 715                                                CRYPTO_ALG_INTERNAL,
 716                                                CRYPTO_ALG_INTERNAL);
 717        if (IS_ERR(mcryptd_tfm))
 718                return PTR_ERR(mcryptd_tfm);
 719        mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
 720        mctx->alg_state = &sha1_mb_alg_state;
 721        ctx->mcryptd_tfm = mcryptd_tfm;
 722        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 723                                sizeof(struct ahash_request) +
 724                                crypto_ahash_reqsize(&mcryptd_tfm->base));
 725
 726        return 0;
 727}
 728
 729static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
 730{
 731        struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
 732
 733        mcryptd_free_ahash(ctx->mcryptd_tfm);
 734}
 735
 736static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
 737{
 738        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 739                                sizeof(struct ahash_request) +
 740                                sizeof(struct sha1_hash_ctx));
 741
 742        return 0;
 743}
 744
 745static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
 746{
 747        struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
 748
 749        mcryptd_free_ahash(ctx->mcryptd_tfm);
 750}
 751
 752static struct ahash_alg sha1_mb_areq_alg = {
 753        .init           =       sha1_mb_init,
 754        .update         =       sha1_mb_update,
 755        .final          =       sha1_mb_final,
 756        .finup          =       sha1_mb_finup,
 757        .export         =       sha1_mb_export,
 758        .import         =       sha1_mb_import,
 759        .halg           =       {
 760                .digestsize     =       SHA1_DIGEST_SIZE,
 761                .statesize      =       sizeof(struct sha1_hash_ctx),
 762                .base           =       {
 763                        .cra_name        = "__sha1-mb",
 764                        .cra_driver_name = "__intel_sha1-mb",
 765                        .cra_priority    = 100,
 766                        /*
 767                         * use ASYNC flag as some buffers in multi-buffer
 768                         * algo may not have completed before hashing thread
 769                         * sleep
 770                         */
 771                        .cra_flags      = CRYPTO_ALG_TYPE_AHASH |
 772                                                CRYPTO_ALG_ASYNC |
 773                                                CRYPTO_ALG_INTERNAL,
 774                        .cra_blocksize  = SHA1_BLOCK_SIZE,
 775                        .cra_module     = THIS_MODULE,
 776                        .cra_list       = LIST_HEAD_INIT
 777                                        (sha1_mb_areq_alg.halg.base.cra_list),
 778                        .cra_init       = sha1_mb_areq_init_tfm,
 779                        .cra_exit       = sha1_mb_areq_exit_tfm,
 780                        .cra_ctxsize    = sizeof(struct sha1_hash_ctx),
 781                }
 782        }
 783};
 784
 785static int sha1_mb_async_init(struct ahash_request *req)
 786{
 787        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 788        struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
 789        struct ahash_request *mcryptd_req = ahash_request_ctx(req);
 790        struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
 791
 792        memcpy(mcryptd_req, req, sizeof(*req));
 793        ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
 794        return crypto_ahash_init(mcryptd_req);
 795}
 796
 797static int sha1_mb_async_update(struct ahash_request *req)
 798{
 799        struct ahash_request *mcryptd_req = ahash_request_ctx(req);
 800
 801        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 802        struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
 803        struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
 804
 805        memcpy(mcryptd_req, req, sizeof(*req));
 806        ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
 807        return crypto_ahash_update(mcryptd_req);
 808}
 809
 810static int sha1_mb_async_finup(struct ahash_request *req)
 811{
 812        struct ahash_request *mcryptd_req = ahash_request_ctx(req);
 813
 814        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 815        struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
 816        struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
 817
 818        memcpy(mcryptd_req, req, sizeof(*req));
 819        ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
 820        return crypto_ahash_finup(mcryptd_req);
 821}
 822
 823static int sha1_mb_async_final(struct ahash_request *req)
 824{
 825        struct ahash_request *mcryptd_req = ahash_request_ctx(req);
 826
 827        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 828        struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
 829        struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
 830
 831        memcpy(mcryptd_req, req, sizeof(*req));
 832        ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
 833        return crypto_ahash_final(mcryptd_req);
 834}
 835
 836static int sha1_mb_async_digest(struct ahash_request *req)
 837{
 838        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 839        struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
 840        struct ahash_request *mcryptd_req = ahash_request_ctx(req);
 841        struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
 842
 843        memcpy(mcryptd_req, req, sizeof(*req));
 844        ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
 845        return crypto_ahash_digest(mcryptd_req);
 846}
 847
 848static int sha1_mb_async_export(struct ahash_request *req, void *out)
 849{
 850        struct ahash_request *mcryptd_req = ahash_request_ctx(req);
 851        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 852        struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
 853        struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
 854
 855        memcpy(mcryptd_req, req, sizeof(*req));
 856        ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
 857        return crypto_ahash_export(mcryptd_req, out);
 858}
 859
 860static int sha1_mb_async_import(struct ahash_request *req, const void *in)
 861{
 862        struct ahash_request *mcryptd_req = ahash_request_ctx(req);
 863        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 864        struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
 865        struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
 866        struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
 867        struct mcryptd_hash_request_ctx *rctx;
 868        struct ahash_request *areq;
 869
 870        memcpy(mcryptd_req, req, sizeof(*req));
 871        ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
 872        rctx = ahash_request_ctx(mcryptd_req);
 873        areq = &rctx->areq;
 874
 875        ahash_request_set_tfm(areq, child);
 876        ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
 877                                        rctx->complete, req);
 878
 879        return crypto_ahash_import(mcryptd_req, in);
 880}
 881
 882static struct ahash_alg sha1_mb_async_alg = {
 883        .init           = sha1_mb_async_init,
 884        .update         = sha1_mb_async_update,
 885        .final          = sha1_mb_async_final,
 886        .finup          = sha1_mb_async_finup,
 887        .digest         = sha1_mb_async_digest,
 888        .export         = sha1_mb_async_export,
 889        .import         = sha1_mb_async_import,
 890        .halg = {
 891                .digestsize     = SHA1_DIGEST_SIZE,
 892                .statesize      = sizeof(struct sha1_hash_ctx),
 893                .base = {
 894                        .cra_name               = "sha1",
 895                        .cra_driver_name        = "sha1_mb",
 896                        .cra_priority           = 200,
 897                        .cra_flags              = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
 898                        .cra_blocksize          = SHA1_BLOCK_SIZE,
 899                        .cra_type               = &crypto_ahash_type,
 900                        .cra_module             = THIS_MODULE,
 901                        .cra_list               = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
 902                        .cra_init               = sha1_mb_async_init_tfm,
 903                        .cra_exit               = sha1_mb_async_exit_tfm,
 904                        .cra_ctxsize            = sizeof(struct sha1_mb_ctx),
 905                        .cra_alignmask          = 0,
 906                },
 907        },
 908};
 909
 910static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
 911{
 912        struct mcryptd_hash_request_ctx *rctx;
 913        unsigned long cur_time;
 914        unsigned long next_flush = 0;
 915        struct sha1_hash_ctx *sha_ctx;
 916
 917
 918        cur_time = jiffies;
 919
 920        while (!list_empty(&cstate->work_list)) {
 921                rctx = list_entry(cstate->work_list.next,
 922                                struct mcryptd_hash_request_ctx, waiter);
 923                if (time_before(cur_time, rctx->tag.expire))
 924                        break;
 925                kernel_fpu_begin();
 926                sha_ctx = (struct sha1_hash_ctx *)
 927                                        sha1_ctx_mgr_flush(cstate->mgr);
 928                kernel_fpu_end();
 929                if (!sha_ctx) {
 930                        pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
 931                        break;
 932                }
 933                rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
 934                sha_finish_walk(&rctx, cstate, true);
 935                sha_complete_job(rctx, cstate, 0);
 936        }
 937
 938        if (!list_empty(&cstate->work_list)) {
 939                rctx = list_entry(cstate->work_list.next,
 940                                struct mcryptd_hash_request_ctx, waiter);
 941                /* get the hash context and then flush time */
 942                next_flush = rctx->tag.expire;
 943                mcryptd_arm_flusher(cstate, get_delay(next_flush));
 944        }
 945        return next_flush;
 946}
 947
 948static int __init sha1_mb_mod_init(void)
 949{
 950
 951        int cpu;
 952        int err;
 953        struct mcryptd_alg_cstate *cpu_state;
 954
 955        /* check for dependent cpu features */
 956        if (!boot_cpu_has(X86_FEATURE_AVX2) ||
 957            !boot_cpu_has(X86_FEATURE_BMI2))
 958                return -ENODEV;
 959
 960        /* initialize multibuffer structures */
 961        sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate);
 962
 963        sha1_job_mgr_init = sha1_mb_mgr_init_avx2;
 964        sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2;
 965        sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2;
 966        sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2;
 967
 968        if (!sha1_mb_alg_state.alg_cstate)
 969                return -ENOMEM;
 970        for_each_possible_cpu(cpu) {
 971                cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
 972                cpu_state->next_flush = 0;
 973                cpu_state->next_seq_num = 0;
 974                cpu_state->flusher_engaged = false;
 975                INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
 976                cpu_state->cpu = cpu;
 977                cpu_state->alg_state = &sha1_mb_alg_state;
 978                cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr),
 979                                        GFP_KERNEL);
 980                if (!cpu_state->mgr)
 981                        goto err2;
 982                sha1_ctx_mgr_init(cpu_state->mgr);
 983                INIT_LIST_HEAD(&cpu_state->work_list);
 984                spin_lock_init(&cpu_state->work_lock);
 985        }
 986        sha1_mb_alg_state.flusher = &sha1_mb_flusher;
 987
 988        err = crypto_register_ahash(&sha1_mb_areq_alg);
 989        if (err)
 990                goto err2;
 991        err = crypto_register_ahash(&sha1_mb_async_alg);
 992        if (err)
 993                goto err1;
 994
 995
 996        return 0;
 997err1:
 998        crypto_unregister_ahash(&sha1_mb_areq_alg);
 999err2:
1000        for_each_possible_cpu(cpu) {
1001                cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
1002                kfree(cpu_state->mgr);
1003        }
1004        free_percpu(sha1_mb_alg_state.alg_cstate);
1005        return -ENODEV;
1006}
1007
1008static void __exit sha1_mb_mod_fini(void)
1009{
1010        int cpu;
1011        struct mcryptd_alg_cstate *cpu_state;
1012
1013        crypto_unregister_ahash(&sha1_mb_async_alg);
1014        crypto_unregister_ahash(&sha1_mb_areq_alg);
1015        for_each_possible_cpu(cpu) {
1016                cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
1017                kfree(cpu_state->mgr);
1018        }
1019        free_percpu(sha1_mb_alg_state.alg_cstate);
1020}
1021
1022module_init(sha1_mb_mod_init);
1023module_exit(sha1_mb_mod_fini);
1024
1025MODULE_LICENSE("GPL");
1026MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
1027
1028MODULE_ALIAS_CRYPTO("sha1");
1029