linux/arch/arm64/crypto/sha256-glue.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Linux/arm64 port of the OpenSSL SHA256 implementation for AArch64
   4 *
   5 * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
   6 */
   7
   8#include <asm/hwcap.h>
   9#include <asm/neon.h>
  10#include <asm/simd.h>
  11#include <crypto/internal/hash.h>
  12#include <crypto/internal/simd.h>
  13#include <crypto/sha.h>
  14#include <crypto/sha256_base.h>
  15#include <linux/cryptohash.h>
  16#include <linux/types.h>
  17#include <linux/string.h>
  18
  19MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64");
  20MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
  21MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
  22MODULE_LICENSE("GPL v2");
  23MODULE_ALIAS_CRYPTO("sha224");
  24MODULE_ALIAS_CRYPTO("sha256");
  25
  26asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
  27                                        unsigned int num_blks);
  28EXPORT_SYMBOL(sha256_block_data_order);
  29
  30asmlinkage void sha256_block_neon(u32 *digest, const void *data,
  31                                  unsigned int num_blks);
  32
  33static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
  34                                      unsigned int len)
  35{
  36        return sha256_base_do_update(desc, data, len,
  37                                (sha256_block_fn *)sha256_block_data_order);
  38}
  39
  40static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
  41                                     unsigned int len, u8 *out)
  42{
  43        if (len)
  44                sha256_base_do_update(desc, data, len,
  45                                (sha256_block_fn *)sha256_block_data_order);
  46        sha256_base_do_finalize(desc,
  47                                (sha256_block_fn *)sha256_block_data_order);
  48
  49        return sha256_base_finish(desc, out);
  50}
  51
  52static int crypto_sha256_arm64_final(struct shash_desc *desc, u8 *out)
  53{
  54        return crypto_sha256_arm64_finup(desc, NULL, 0, out);
  55}
  56
  57static struct shash_alg algs[] = { {
  58        .digestsize             = SHA256_DIGEST_SIZE,
  59        .init                   = sha256_base_init,
  60        .update                 = crypto_sha256_arm64_update,
  61        .final                  = crypto_sha256_arm64_final,
  62        .finup                  = crypto_sha256_arm64_finup,
  63        .descsize               = sizeof(struct sha256_state),
  64        .base.cra_name          = "sha256",
  65        .base.cra_driver_name   = "sha256-arm64",
  66        .base.cra_priority      = 125,
  67        .base.cra_blocksize     = SHA256_BLOCK_SIZE,
  68        .base.cra_module        = THIS_MODULE,
  69}, {
  70        .digestsize             = SHA224_DIGEST_SIZE,
  71        .init                   = sha224_base_init,
  72        .update                 = crypto_sha256_arm64_update,
  73        .final                  = crypto_sha256_arm64_final,
  74        .finup                  = crypto_sha256_arm64_finup,
  75        .descsize               = sizeof(struct sha256_state),
  76        .base.cra_name          = "sha224",
  77        .base.cra_driver_name   = "sha224-arm64",
  78        .base.cra_priority      = 125,
  79        .base.cra_blocksize     = SHA224_BLOCK_SIZE,
  80        .base.cra_module        = THIS_MODULE,
  81} };
  82
  83static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
  84                              unsigned int len)
  85{
  86        struct sha256_state *sctx = shash_desc_ctx(desc);
  87
  88        if (!crypto_simd_usable())
  89                return sha256_base_do_update(desc, data, len,
  90                                (sha256_block_fn *)sha256_block_data_order);
  91
  92        while (len > 0) {
  93                unsigned int chunk = len;
  94
  95                /*
  96                 * Don't hog the CPU for the entire time it takes to process all
  97                 * input when running on a preemptible kernel, but process the
  98                 * data block by block instead.
  99                 */
 100                if (IS_ENABLED(CONFIG_PREEMPT) &&
 101                    chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
 102                        chunk = SHA256_BLOCK_SIZE -
 103                                sctx->count % SHA256_BLOCK_SIZE;
 104
 105                kernel_neon_begin();
 106                sha256_base_do_update(desc, data, chunk,
 107                                      (sha256_block_fn *)sha256_block_neon);
 108                kernel_neon_end();
 109                data += chunk;
 110                len -= chunk;
 111        }
 112        return 0;
 113}
 114
 115static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
 116                             unsigned int len, u8 *out)
 117{
 118        if (!crypto_simd_usable()) {
 119                if (len)
 120                        sha256_base_do_update(desc, data, len,
 121                                (sha256_block_fn *)sha256_block_data_order);
 122                sha256_base_do_finalize(desc,
 123                                (sha256_block_fn *)sha256_block_data_order);
 124        } else {
 125                if (len)
 126                        sha256_update_neon(desc, data, len);
 127                kernel_neon_begin();
 128                sha256_base_do_finalize(desc,
 129                                (sha256_block_fn *)sha256_block_neon);
 130                kernel_neon_end();
 131        }
 132        return sha256_base_finish(desc, out);
 133}
 134
 135static int sha256_final_neon(struct shash_desc *desc, u8 *out)
 136{
 137        return sha256_finup_neon(desc, NULL, 0, out);
 138}
 139
 140static struct shash_alg neon_algs[] = { {
 141        .digestsize             = SHA256_DIGEST_SIZE,
 142        .init                   = sha256_base_init,
 143        .update                 = sha256_update_neon,
 144        .final                  = sha256_final_neon,
 145        .finup                  = sha256_finup_neon,
 146        .descsize               = sizeof(struct sha256_state),
 147        .base.cra_name          = "sha256",
 148        .base.cra_driver_name   = "sha256-arm64-neon",
 149        .base.cra_priority      = 150,
 150        .base.cra_blocksize     = SHA256_BLOCK_SIZE,
 151        .base.cra_module        = THIS_MODULE,
 152}, {
 153        .digestsize             = SHA224_DIGEST_SIZE,
 154        .init                   = sha224_base_init,
 155        .update                 = sha256_update_neon,
 156        .final                  = sha256_final_neon,
 157        .finup                  = sha256_finup_neon,
 158        .descsize               = sizeof(struct sha256_state),
 159        .base.cra_name          = "sha224",
 160        .base.cra_driver_name   = "sha224-arm64-neon",
 161        .base.cra_priority      = 150,
 162        .base.cra_blocksize     = SHA224_BLOCK_SIZE,
 163        .base.cra_module        = THIS_MODULE,
 164} };
 165
 166static int __init sha256_mod_init(void)
 167{
 168        int ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
 169        if (ret)
 170                return ret;
 171
 172        if (cpu_have_named_feature(ASIMD)) {
 173                ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs));
 174                if (ret)
 175                        crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
 176        }
 177        return ret;
 178}
 179
 180static void __exit sha256_mod_fini(void)
 181{
 182        if (cpu_have_named_feature(ASIMD))
 183                crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs));
 184        crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
 185}
 186
 187module_init(sha256_mod_init);
 188module_exit(sha256_mod_fini);
 189