linux/arch/x86/crypto/aesni-intel_glue.c
<<
>>
Prefs
   1/*
   2 * Support for Intel AES-NI instructions. This file contains glue
   3 * code, the real AES implementation is in intel-aes_asm.S.
   4 *
   5 * Copyright (C) 2008, Intel Corp.
   6 *    Author: Huang Ying <ying.huang@intel.com>
   7 *
   8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
   9 * interface for 64-bit kernels.
  10 *    Authors: Adrian Hoban <adrian.hoban@intel.com>
  11 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  12 *             Tadeusz Struk (tadeusz.struk@intel.com)
  13 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  14 *    Copyright (c) 2010, Intel Corporation.
  15 *
  16 * This program is free software; you can redistribute it and/or modify
  17 * it under the terms of the GNU General Public License as published by
  18 * the Free Software Foundation; either version 2 of the License, or
  19 * (at your option) any later version.
  20 */
  21
  22#include <linux/hardirq.h>
  23#include <linux/types.h>
  24#include <linux/module.h>
  25#include <linux/err.h>
  26#include <crypto/algapi.h>
  27#include <crypto/aes.h>
  28#include <crypto/cryptd.h>
  29#include <crypto/ctr.h>
  30#include <crypto/b128ops.h>
  31#include <crypto/xts.h>
  32#include <asm/cpu_device_id.h>
  33#include <asm/fpu/api.h>
  34#include <asm/crypto/aes.h>
  35#include <crypto/scatterwalk.h>
  36#include <crypto/internal/aead.h>
  37#include <crypto/internal/simd.h>
  38#include <crypto/internal/skcipher.h>
  39#include <linux/workqueue.h>
  40#include <linux/spinlock.h>
  41#ifdef CONFIG_X86_64
  42#include <asm/crypto/glue_helper.h>
  43#endif
  44
  45
  46#define AESNI_ALIGN     16
  47#define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
  48#define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE - 1))
  49#define RFC4106_HASH_SUBKEY_SIZE 16
  50#define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
  51#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
  52#define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
  53
  54/* This data is stored at the end of the crypto_tfm struct.
  55 * It's a type of per "session" data storage location.
  56 * This needs to be 16 byte aligned.
  57 */
  58struct aesni_rfc4106_gcm_ctx {
  59        u8 hash_subkey[16] AESNI_ALIGN_ATTR;
  60        struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
  61        u8 nonce[4];
  62};
  63
  64struct generic_gcmaes_ctx {
  65        u8 hash_subkey[16] AESNI_ALIGN_ATTR;
  66        struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
  67};
  68
  69struct aesni_xts_ctx {
  70        u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
  71        u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
  72};
  73
  74asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
  75                             unsigned int key_len);
  76asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
  77                          const u8 *in);
  78asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
  79                          const u8 *in);
  80asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
  81                              const u8 *in, unsigned int len);
  82asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
  83                              const u8 *in, unsigned int len);
  84asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
  85                              const u8 *in, unsigned int len, u8 *iv);
  86asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
  87                              const u8 *in, unsigned int len, u8 *iv);
  88
  89int crypto_fpu_init(void);
  90void crypto_fpu_exit(void);
  91
  92#define AVX_GEN2_OPTSIZE 640
  93#define AVX_GEN4_OPTSIZE 4096
  94
  95#ifdef CONFIG_X86_64
  96
  97static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
  98                              const u8 *in, unsigned int len, u8 *iv);
  99asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
 100                              const u8 *in, unsigned int len, u8 *iv);
 101
 102asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
 103                                 const u8 *in, bool enc, u8 *iv);
 104
 105/* asmlinkage void aesni_gcm_enc()
 106 * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
 107 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
 108 * const u8 *in, Plaintext input
 109 * unsigned long plaintext_len, Length of data in bytes for encryption.
 110 * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
 111 *         16-byte aligned pointer.
 112 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
 113 * const u8 *aad, Additional Authentication Data (AAD)
 114 * unsigned long aad_len, Length of AAD in bytes.
 115 * u8 *auth_tag, Authenticated Tag output.
 116 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
 117 *          Valid values are 16 (most likely), 12 or 8.
 118 */
 119asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
 120                        const u8 *in, unsigned long plaintext_len, u8 *iv,
 121                        u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 122                        u8 *auth_tag, unsigned long auth_tag_len);
 123
 124/* asmlinkage void aesni_gcm_dec()
 125 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
 126 * u8 *out, Plaintext output. Decrypt in-place is allowed.
 127 * const u8 *in, Ciphertext input
 128 * unsigned long ciphertext_len, Length of data in bytes for decryption.
 129 * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
 130 *         16-byte aligned pointer.
 131 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
 132 * const u8 *aad, Additional Authentication Data (AAD)
 133 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
 134 * to be 8 or 12 bytes
 135 * u8 *auth_tag, Authenticated Tag output.
 136 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
 137 * Valid values are 16 (most likely), 12 or 8.
 138 */
 139asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
 140                        const u8 *in, unsigned long ciphertext_len, u8 *iv,
 141                        u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 142                        u8 *auth_tag, unsigned long auth_tag_len);
 143
 144
 145#ifdef CONFIG_AS_AVX
 146asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
 147                void *keys, u8 *out, unsigned int num_bytes);
 148asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
 149                void *keys, u8 *out, unsigned int num_bytes);
 150asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
 151                void *keys, u8 *out, unsigned int num_bytes);
 152/*
 153 * asmlinkage void aesni_gcm_precomp_avx_gen2()
 154 * gcm_data *my_ctx_data, context data
 155 * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
 156 */
 157asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
 158
 159asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
 160                        const u8 *in, unsigned long plaintext_len, u8 *iv,
 161                        const u8 *aad, unsigned long aad_len,
 162                        u8 *auth_tag, unsigned long auth_tag_len);
 163
 164asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
 165                        const u8 *in, unsigned long ciphertext_len, u8 *iv,
 166                        const u8 *aad, unsigned long aad_len,
 167                        u8 *auth_tag, unsigned long auth_tag_len);
 168
 169static void aesni_gcm_enc_avx(void *ctx, u8 *out,
 170                        const u8 *in, unsigned long plaintext_len, u8 *iv,
 171                        u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 172                        u8 *auth_tag, unsigned long auth_tag_len)
 173{
 174        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
 175        if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
 176                aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
 177                                aad_len, auth_tag, auth_tag_len);
 178        } else {
 179                aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
 180                aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
 181                                        aad_len, auth_tag, auth_tag_len);
 182        }
 183}
 184
 185static void aesni_gcm_dec_avx(void *ctx, u8 *out,
 186                        const u8 *in, unsigned long ciphertext_len, u8 *iv,
 187                        u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 188                        u8 *auth_tag, unsigned long auth_tag_len)
 189{
 190        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
 191        if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
 192                aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
 193                                aad_len, auth_tag, auth_tag_len);
 194        } else {
 195                aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
 196                aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
 197                                        aad_len, auth_tag, auth_tag_len);
 198        }
 199}
 200#endif
 201
 202#ifdef CONFIG_AS_AVX2
 203/*
 204 * asmlinkage void aesni_gcm_precomp_avx_gen4()
 205 * gcm_data *my_ctx_data, context data
 206 * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
 207 */
 208asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
 209
 210asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
 211                        const u8 *in, unsigned long plaintext_len, u8 *iv,
 212                        const u8 *aad, unsigned long aad_len,
 213                        u8 *auth_tag, unsigned long auth_tag_len);
 214
 215asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
 216                        const u8 *in, unsigned long ciphertext_len, u8 *iv,
 217                        const u8 *aad, unsigned long aad_len,
 218                        u8 *auth_tag, unsigned long auth_tag_len);
 219
 220static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
 221                        const u8 *in, unsigned long plaintext_len, u8 *iv,
 222                        u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 223                        u8 *auth_tag, unsigned long auth_tag_len)
 224{
 225       struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
 226        if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
 227                aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
 228                                aad_len, auth_tag, auth_tag_len);
 229        } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
 230                aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
 231                aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
 232                                        aad_len, auth_tag, auth_tag_len);
 233        } else {
 234                aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
 235                aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
 236                                        aad_len, auth_tag, auth_tag_len);
 237        }
 238}
 239
 240static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
 241                        const u8 *in, unsigned long ciphertext_len, u8 *iv,
 242                        u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 243                        u8 *auth_tag, unsigned long auth_tag_len)
 244{
 245       struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
 246        if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
 247                aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
 248                                aad, aad_len, auth_tag, auth_tag_len);
 249        } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
 250                aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
 251                aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
 252                                        aad_len, auth_tag, auth_tag_len);
 253        } else {
 254                aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
 255                aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
 256                                        aad_len, auth_tag, auth_tag_len);
 257        }
 258}
 259#endif
 260
 261static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
 262                        const u8 *in, unsigned long plaintext_len, u8 *iv,
 263                        u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 264                        u8 *auth_tag, unsigned long auth_tag_len);
 265
 266static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
 267                        const u8 *in, unsigned long ciphertext_len, u8 *iv,
 268                        u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
 269                        u8 *auth_tag, unsigned long auth_tag_len);
 270
 271static inline struct
 272aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
 273{
 274        unsigned long align = AESNI_ALIGN;
 275
 276        if (align <= crypto_tfm_ctx_alignment())
 277                align = 1;
 278        return PTR_ALIGN(crypto_aead_ctx(tfm), align);
 279}
 280
 281static inline struct
 282generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
 283{
 284        unsigned long align = AESNI_ALIGN;
 285
 286        if (align <= crypto_tfm_ctx_alignment())
 287                align = 1;
 288        return PTR_ALIGN(crypto_aead_ctx(tfm), align);
 289}
 290#endif
 291
 292static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 293{
 294        unsigned long addr = (unsigned long)raw_ctx;
 295        unsigned long align = AESNI_ALIGN;
 296
 297        if (align <= crypto_tfm_ctx_alignment())
 298                align = 1;
 299        return (struct crypto_aes_ctx *)ALIGN(addr, align);
 300}
 301
 302static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
 303                              const u8 *in_key, unsigned int key_len)
 304{
 305        struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
 306        u32 *flags = &tfm->crt_flags;
 307        int err;
 308
 309        if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
 310            key_len != AES_KEYSIZE_256) {
 311                *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 312                return -EINVAL;
 313        }
 314
 315        if (!irq_fpu_usable())
 316                err = crypto_aes_expand_key(ctx, in_key, key_len);
 317        else {
 318                kernel_fpu_begin();
 319                err = aesni_set_key(ctx, in_key, key_len);
 320                kernel_fpu_end();
 321        }
 322
 323        return err;
 324}
 325
 326static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 327                       unsigned int key_len)
 328{
 329        return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
 330}
 331
 332static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 333{
 334        struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 335
 336        if (!irq_fpu_usable())
 337                crypto_aes_encrypt_x86(ctx, dst, src);
 338        else {
 339                kernel_fpu_begin();
 340                aesni_enc(ctx, dst, src);
 341                kernel_fpu_end();
 342        }
 343}
 344
 345static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 346{
 347        struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 348
 349        if (!irq_fpu_usable())
 350                crypto_aes_decrypt_x86(ctx, dst, src);
 351        else {
 352                kernel_fpu_begin();
 353                aesni_dec(ctx, dst, src);
 354                kernel_fpu_end();
 355        }
 356}
 357
 358static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 359{
 360        struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 361
 362        aesni_enc(ctx, dst, src);
 363}
 364
 365static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 366{
 367        struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 368
 369        aesni_dec(ctx, dst, src);
 370}
 371
 372static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
 373                                 unsigned int len)
 374{
 375        return aes_set_key_common(crypto_skcipher_tfm(tfm),
 376                                  crypto_skcipher_ctx(tfm), key, len);
 377}
 378
 379static int ecb_encrypt(struct skcipher_request *req)
 380{
 381        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 382        struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 383        struct skcipher_walk walk;
 384        unsigned int nbytes;
 385        int err;
 386
 387        err = skcipher_walk_virt(&walk, req, true);
 388
 389        kernel_fpu_begin();
 390        while ((nbytes = walk.nbytes)) {
 391                aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 392                              nbytes & AES_BLOCK_MASK);
 393                nbytes &= AES_BLOCK_SIZE - 1;
 394                err = skcipher_walk_done(&walk, nbytes);
 395        }
 396        kernel_fpu_end();
 397
 398        return err;
 399}
 400
 401static int ecb_decrypt(struct skcipher_request *req)
 402{
 403        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 404        struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 405        struct skcipher_walk walk;
 406        unsigned int nbytes;
 407        int err;
 408
 409        err = skcipher_walk_virt(&walk, req, true);
 410
 411        kernel_fpu_begin();
 412        while ((nbytes = walk.nbytes)) {
 413                aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 414                              nbytes & AES_BLOCK_MASK);
 415                nbytes &= AES_BLOCK_SIZE - 1;
 416                err = skcipher_walk_done(&walk, nbytes);
 417        }
 418        kernel_fpu_end();
 419
 420        return err;
 421}
 422
 423static int cbc_encrypt(struct skcipher_request *req)
 424{
 425        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 426        struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 427        struct skcipher_walk walk;
 428        unsigned int nbytes;
 429        int err;
 430
 431        err = skcipher_walk_virt(&walk, req, true);
 432
 433        kernel_fpu_begin();
 434        while ((nbytes = walk.nbytes)) {
 435                aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 436                              nbytes & AES_BLOCK_MASK, walk.iv);
 437                nbytes &= AES_BLOCK_SIZE - 1;
 438                err = skcipher_walk_done(&walk, nbytes);
 439        }
 440        kernel_fpu_end();
 441
 442        return err;
 443}
 444
 445static int cbc_decrypt(struct skcipher_request *req)
 446{
 447        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 448        struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 449        struct skcipher_walk walk;
 450        unsigned int nbytes;
 451        int err;
 452
 453        err = skcipher_walk_virt(&walk, req, true);
 454
 455        kernel_fpu_begin();
 456        while ((nbytes = walk.nbytes)) {
 457                aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 458                              nbytes & AES_BLOCK_MASK, walk.iv);
 459                nbytes &= AES_BLOCK_SIZE - 1;
 460                err = skcipher_walk_done(&walk, nbytes);
 461        }
 462        kernel_fpu_end();
 463
 464        return err;
 465}
 466
 467#ifdef CONFIG_X86_64
 468static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
 469                            struct skcipher_walk *walk)
 470{
 471        u8 *ctrblk = walk->iv;
 472        u8 keystream[AES_BLOCK_SIZE];
 473        u8 *src = walk->src.virt.addr;
 474        u8 *dst = walk->dst.virt.addr;
 475        unsigned int nbytes = walk->nbytes;
 476
 477        aesni_enc(ctx, keystream, ctrblk);
 478        crypto_xor_cpy(dst, keystream, src, nbytes);
 479
 480        crypto_inc(ctrblk, AES_BLOCK_SIZE);
 481}
 482
 483#ifdef CONFIG_AS_AVX
 484static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
 485                              const u8 *in, unsigned int len, u8 *iv)
 486{
 487        /*
 488         * based on key length, override with the by8 version
 489         * of ctr mode encryption/decryption for improved performance
 490         * aes_set_key_common() ensures that key length is one of
 491         * {128,192,256}
 492         */
 493        if (ctx->key_length == AES_KEYSIZE_128)
 494                aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
 495        else if (ctx->key_length == AES_KEYSIZE_192)
 496                aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
 497        else
 498                aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
 499}
 500#endif
 501
 502static int ctr_crypt(struct skcipher_request *req)
 503{
 504        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 505        struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
 506        struct skcipher_walk walk;
 507        unsigned int nbytes;
 508        int err;
 509
 510        err = skcipher_walk_virt(&walk, req, true);
 511
 512        kernel_fpu_begin();
 513        while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
 514                aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 515                                      nbytes & AES_BLOCK_MASK, walk.iv);
 516                nbytes &= AES_BLOCK_SIZE - 1;
 517                err = skcipher_walk_done(&walk, nbytes);
 518        }
 519        if (walk.nbytes) {
 520                ctr_crypt_final(ctx, &walk);
 521                err = skcipher_walk_done(&walk, 0);
 522        }
 523        kernel_fpu_end();
 524
 525        return err;
 526}
 527
 528static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
 529                            unsigned int keylen)
 530{
 531        struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 532        int err;
 533
 534        err = xts_verify_key(tfm, key, keylen);
 535        if (err)
 536                return err;
 537
 538        keylen /= 2;
 539
 540        /* first half of xts-key is for crypt */
 541        err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
 542                                 key, keylen);
 543        if (err)
 544                return err;
 545
 546        /* second half of xts-key is for tweak */
 547        return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
 548                                  key + keylen, keylen);
 549}
 550
 551
 552static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
 553{
 554        aesni_enc(ctx, out, in);
 555}
 556
 557static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 558{
 559        glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
 560}
 561
 562static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 563{
 564        glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
 565}
 566
 567static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 568{
 569        aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
 570}
 571
 572static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 573{
 574        aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
 575}
 576
 577static const struct common_glue_ctx aesni_enc_xts = {
 578        .num_funcs = 2,
 579        .fpu_blocks_limit = 1,
 580
 581        .funcs = { {
 582                .num_blocks = 8,
 583                .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
 584        }, {
 585                .num_blocks = 1,
 586                .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
 587        } }
 588};
 589
 590static const struct common_glue_ctx aesni_dec_xts = {
 591        .num_funcs = 2,
 592        .fpu_blocks_limit = 1,
 593
 594        .funcs = { {
 595                .num_blocks = 8,
 596                .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
 597        }, {
 598                .num_blocks = 1,
 599                .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
 600        } }
 601};
 602
 603static int xts_encrypt(struct skcipher_request *req)
 604{
 605        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 606        struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 607
 608        return glue_xts_req_128bit(&aesni_enc_xts, req,
 609                                   XTS_TWEAK_CAST(aesni_xts_tweak),
 610                                   aes_ctx(ctx->raw_tweak_ctx),
 611                                   aes_ctx(ctx->raw_crypt_ctx));
 612}
 613
 614static int xts_decrypt(struct skcipher_request *req)
 615{
 616        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 617        struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 618
 619        return glue_xts_req_128bit(&aesni_dec_xts, req,
 620                                   XTS_TWEAK_CAST(aesni_xts_tweak),
 621                                   aes_ctx(ctx->raw_tweak_ctx),
 622                                   aes_ctx(ctx->raw_crypt_ctx));
 623}
 624
 625static int rfc4106_init(struct crypto_aead *aead)
 626{
 627        struct cryptd_aead *cryptd_tfm;
 628        struct cryptd_aead **ctx = crypto_aead_ctx(aead);
 629
 630        cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
 631                                       CRYPTO_ALG_INTERNAL,
 632                                       CRYPTO_ALG_INTERNAL);
 633        if (IS_ERR(cryptd_tfm))
 634                return PTR_ERR(cryptd_tfm);
 635
 636        *ctx = cryptd_tfm;
 637        crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
 638        return 0;
 639}
 640
 641static void rfc4106_exit(struct crypto_aead *aead)
 642{
 643        struct cryptd_aead **ctx = crypto_aead_ctx(aead);
 644
 645        cryptd_free_aead(*ctx);
 646}
 647
 648static int
 649rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
 650{
 651        struct crypto_cipher *tfm;
 652        int ret;
 653
 654        tfm = crypto_alloc_cipher("aes", 0, 0);
 655        if (IS_ERR(tfm))
 656                return PTR_ERR(tfm);
 657
 658        ret = crypto_cipher_setkey(tfm, key, key_len);
 659        if (ret)
 660                goto out_free_cipher;
 661
 662        /* Clear the data in the hash sub key container to zero.*/
 663        /* We want to cipher all zeros to create the hash sub key. */
 664        memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
 665
 666        crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
 667
 668out_free_cipher:
 669        crypto_free_cipher(tfm);
 670        return ret;
 671}
 672
 673static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
 674                                  unsigned int key_len)
 675{
 676        struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
 677
 678        if (key_len < 4) {
 679                crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
 680                return -EINVAL;
 681        }
 682        /*Account for 4 byte nonce at the end.*/
 683        key_len -= 4;
 684
 685        memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
 686
 687        return aes_set_key_common(crypto_aead_tfm(aead),
 688                                  &ctx->aes_key_expanded, key, key_len) ?:
 689               rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
 690}
 691
 692static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
 693                           unsigned int key_len)
 694{
 695        struct cryptd_aead **ctx = crypto_aead_ctx(parent);
 696        struct cryptd_aead *cryptd_tfm = *ctx;
 697
 698        return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
 699}
 700
 701static int common_rfc4106_set_authsize(struct crypto_aead *aead,
 702                                       unsigned int authsize)
 703{
 704        switch (authsize) {
 705        case 8:
 706        case 12:
 707        case 16:
 708                break;
 709        default:
 710                return -EINVAL;
 711        }
 712
 713        return 0;
 714}
 715
 716/* This is the Integrity Check Value (aka the authentication tag length and can
 717 * be 8, 12 or 16 bytes long. */
 718static int rfc4106_set_authsize(struct crypto_aead *parent,
 719                                unsigned int authsize)
 720{
 721        struct cryptd_aead **ctx = crypto_aead_ctx(parent);
 722        struct cryptd_aead *cryptd_tfm = *ctx;
 723
 724        return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
 725}
 726
 727static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
 728                                       unsigned int authsize)
 729{
 730        switch (authsize) {
 731        case 4:
 732        case 8:
 733        case 12:
 734        case 13:
 735        case 14:
 736        case 15:
 737        case 16:
 738                break;
 739        default:
 740                return -EINVAL;
 741        }
 742
 743        return 0;
 744}
 745
 746static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
 747                          u8 *hash_subkey, u8 *iv, void *aes_ctx)
 748{
 749        u8 one_entry_in_sg = 0;
 750        u8 *src, *dst, *assoc;
 751        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 752        unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 753        struct scatter_walk src_sg_walk;
 754        struct scatter_walk dst_sg_walk = {};
 755
 756        if (sg_is_last(req->src) &&
 757            (!PageHighMem(sg_page(req->src)) ||
 758            req->src->offset + req->src->length <= PAGE_SIZE) &&
 759            sg_is_last(req->dst) &&
 760            (!PageHighMem(sg_page(req->dst)) ||
 761            req->dst->offset + req->dst->length <= PAGE_SIZE)) {
 762                one_entry_in_sg = 1;
 763                scatterwalk_start(&src_sg_walk, req->src);
 764                assoc = scatterwalk_map(&src_sg_walk);
 765                src = assoc + req->assoclen;
 766                dst = src;
 767                if (unlikely(req->src != req->dst)) {
 768                        scatterwalk_start(&dst_sg_walk, req->dst);
 769                        dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
 770                }
 771        } else {
 772                /* Allocate memory for src, dst, assoc */
 773                assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
 774                        GFP_ATOMIC);
 775                if (unlikely(!assoc))
 776                        return -ENOMEM;
 777                scatterwalk_map_and_copy(assoc, req->src, 0,
 778                                         req->assoclen + req->cryptlen, 0);
 779                src = assoc + req->assoclen;
 780                dst = src;
 781        }
 782
 783        kernel_fpu_begin();
 784        aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
 785                          hash_subkey, assoc, assoclen,
 786                          dst + req->cryptlen, auth_tag_len);
 787        kernel_fpu_end();
 788
 789        /* The authTag (aka the Integrity Check Value) needs to be written
 790         * back to the packet. */
 791        if (one_entry_in_sg) {
 792                if (unlikely(req->src != req->dst)) {
 793                        scatterwalk_unmap(dst - req->assoclen);
 794                        scatterwalk_advance(&dst_sg_walk, req->dst->length);
 795                        scatterwalk_done(&dst_sg_walk, 1, 0);
 796                }
 797                scatterwalk_unmap(assoc);
 798                scatterwalk_advance(&src_sg_walk, req->src->length);
 799                scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
 800        } else {
 801                scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
 802                                         req->cryptlen + auth_tag_len, 1);
 803                kfree(assoc);
 804        }
 805        return 0;
 806}
 807
 808static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
 809                          u8 *hash_subkey, u8 *iv, void *aes_ctx)
 810{
 811        u8 one_entry_in_sg = 0;
 812        u8 *src, *dst, *assoc;
 813        unsigned long tempCipherLen = 0;
 814        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 815        unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 816        u8 authTag[16];
 817        struct scatter_walk src_sg_walk;
 818        struct scatter_walk dst_sg_walk = {};
 819        int retval = 0;
 820
 821        tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
 822
 823        if (sg_is_last(req->src) &&
 824            (!PageHighMem(sg_page(req->src)) ||
 825            req->src->offset + req->src->length <= PAGE_SIZE) &&
 826            sg_is_last(req->dst) &&
 827            (!PageHighMem(sg_page(req->dst)) ||
 828            req->dst->offset + req->dst->length <= PAGE_SIZE)) {
 829                one_entry_in_sg = 1;
 830                scatterwalk_start(&src_sg_walk, req->src);
 831                assoc = scatterwalk_map(&src_sg_walk);
 832                src = assoc + req->assoclen;
 833                dst = src;
 834                if (unlikely(req->src != req->dst)) {
 835                        scatterwalk_start(&dst_sg_walk, req->dst);
 836                        dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
 837                }
 838        } else {
 839                /* Allocate memory for src, dst, assoc */
 840                assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
 841                if (!assoc)
 842                        return -ENOMEM;
 843                scatterwalk_map_and_copy(assoc, req->src, 0,
 844                                         req->assoclen + req->cryptlen, 0);
 845                src = assoc + req->assoclen;
 846                dst = src;
 847        }
 848
 849
 850        kernel_fpu_begin();
 851        aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
 852                          hash_subkey, assoc, assoclen,
 853                          authTag, auth_tag_len);
 854        kernel_fpu_end();
 855
 856        /* Compare generated tag with passed in tag. */
 857        retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
 858                -EBADMSG : 0;
 859
 860        if (one_entry_in_sg) {
 861                if (unlikely(req->src != req->dst)) {
 862                        scatterwalk_unmap(dst - req->assoclen);
 863                        scatterwalk_advance(&dst_sg_walk, req->dst->length);
 864                        scatterwalk_done(&dst_sg_walk, 1, 0);
 865                }
 866                scatterwalk_unmap(assoc);
 867                scatterwalk_advance(&src_sg_walk, req->src->length);
 868                scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
 869        } else {
 870                scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
 871                                         tempCipherLen, 1);
 872                kfree(assoc);
 873        }
 874        return retval;
 875
 876}
 877
 878static int helper_rfc4106_encrypt(struct aead_request *req)
 879{
 880        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 881        struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 882        void *aes_ctx = &(ctx->aes_key_expanded);
 883        u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 884        unsigned int i;
 885        __be32 counter = cpu_to_be32(1);
 886
 887        /* Assuming we are supporting rfc4106 64-bit extended */
 888        /* sequence numbers We need to have the AAD length equal */
 889        /* to 16 or 20 bytes */
 890        if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 891                return -EINVAL;
 892
 893        /* IV below built */
 894        for (i = 0; i < 4; i++)
 895                *(iv+i) = ctx->nonce[i];
 896        for (i = 0; i < 8; i++)
 897                *(iv+4+i) = req->iv[i];
 898        *((__be32 *)(iv+12)) = counter;
 899
 900        return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
 901                              aes_ctx);
 902}
 903
 904static int helper_rfc4106_decrypt(struct aead_request *req)
 905{
 906        __be32 counter = cpu_to_be32(1);
 907        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 908        struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 909        void *aes_ctx = &(ctx->aes_key_expanded);
 910        u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 911        unsigned int i;
 912
 913        if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 914                return -EINVAL;
 915
 916        /* Assuming we are supporting rfc4106 64-bit extended */
 917        /* sequence numbers We need to have the AAD length */
 918        /* equal to 16 or 20 bytes */
 919
 920        /* IV below built */
 921        for (i = 0; i < 4; i++)
 922                *(iv+i) = ctx->nonce[i];
 923        for (i = 0; i < 8; i++)
 924                *(iv+4+i) = req->iv[i];
 925        *((__be32 *)(iv+12)) = counter;
 926
 927        return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
 928                              aes_ctx);
 929}
 930
 931static int rfc4106_encrypt(struct aead_request *req)
 932{
 933        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 934        struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
 935        struct cryptd_aead *cryptd_tfm = *ctx;
 936
 937        tfm = &cryptd_tfm->base;
 938        if (irq_fpu_usable() && (!in_atomic() ||
 939                                 !cryptd_aead_queued(cryptd_tfm)))
 940                tfm = cryptd_aead_child(cryptd_tfm);
 941
 942        aead_request_set_tfm(req, tfm);
 943
 944        return crypto_aead_encrypt(req);
 945}
 946
 947static int rfc4106_decrypt(struct aead_request *req)
 948{
 949        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 950        struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
 951        struct cryptd_aead *cryptd_tfm = *ctx;
 952
 953        tfm = &cryptd_tfm->base;
 954        if (irq_fpu_usable() && (!in_atomic() ||
 955                                 !cryptd_aead_queued(cryptd_tfm)))
 956                tfm = cryptd_aead_child(cryptd_tfm);
 957
 958        aead_request_set_tfm(req, tfm);
 959
 960        return crypto_aead_decrypt(req);
 961}
 962#endif
 963
 964static struct crypto_alg aesni_algs[] = { {
 965        .cra_name               = "aes",
 966        .cra_driver_name        = "aes-aesni",
 967        .cra_priority           = 300,
 968        .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
 969        .cra_blocksize          = AES_BLOCK_SIZE,
 970        .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
 971        .cra_module             = THIS_MODULE,
 972        .cra_u  = {
 973                .cipher = {
 974                        .cia_min_keysize        = AES_MIN_KEY_SIZE,
 975                        .cia_max_keysize        = AES_MAX_KEY_SIZE,
 976                        .cia_setkey             = aes_set_key,
 977                        .cia_encrypt            = aes_encrypt,
 978                        .cia_decrypt            = aes_decrypt
 979                }
 980        }
 981}, {
 982        .cra_name               = "__aes",
 983        .cra_driver_name        = "__aes-aesni",
 984        .cra_priority           = 300,
 985        .cra_flags              = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
 986        .cra_blocksize          = AES_BLOCK_SIZE,
 987        .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
 988        .cra_module             = THIS_MODULE,
 989        .cra_u  = {
 990                .cipher = {
 991                        .cia_min_keysize        = AES_MIN_KEY_SIZE,
 992                        .cia_max_keysize        = AES_MAX_KEY_SIZE,
 993                        .cia_setkey             = aes_set_key,
 994                        .cia_encrypt            = __aes_encrypt,
 995                        .cia_decrypt            = __aes_decrypt
 996                }
 997        }
 998} };
 999
1000static struct skcipher_alg aesni_skciphers[] = {
1001        {
1002                .base = {
1003                        .cra_name               = "__ecb(aes)",
1004                        .cra_driver_name        = "__ecb-aes-aesni",
1005                        .cra_priority           = 400,
1006                        .cra_flags              = CRYPTO_ALG_INTERNAL,
1007                        .cra_blocksize          = AES_BLOCK_SIZE,
1008                        .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1009                        .cra_module             = THIS_MODULE,
1010                },
1011                .min_keysize    = AES_MIN_KEY_SIZE,
1012                .max_keysize    = AES_MAX_KEY_SIZE,
1013                .setkey         = aesni_skcipher_setkey,
1014                .encrypt        = ecb_encrypt,
1015                .decrypt        = ecb_decrypt,
1016        }, {
1017                .base = {
1018                        .cra_name               = "__cbc(aes)",
1019                        .cra_driver_name        = "__cbc-aes-aesni",
1020                        .cra_priority           = 400,
1021                        .cra_flags              = CRYPTO_ALG_INTERNAL,
1022                        .cra_blocksize          = AES_BLOCK_SIZE,
1023                        .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1024                        .cra_module             = THIS_MODULE,
1025                },
1026                .min_keysize    = AES_MIN_KEY_SIZE,
1027                .max_keysize    = AES_MAX_KEY_SIZE,
1028                .ivsize         = AES_BLOCK_SIZE,
1029                .setkey         = aesni_skcipher_setkey,
1030                .encrypt        = cbc_encrypt,
1031                .decrypt        = cbc_decrypt,
1032#ifdef CONFIG_X86_64
1033        }, {
1034                .base = {
1035                        .cra_name               = "__ctr(aes)",
1036                        .cra_driver_name        = "__ctr-aes-aesni",
1037                        .cra_priority           = 400,
1038                        .cra_flags              = CRYPTO_ALG_INTERNAL,
1039                        .cra_blocksize          = 1,
1040                        .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1041                        .cra_module             = THIS_MODULE,
1042                },
1043                .min_keysize    = AES_MIN_KEY_SIZE,
1044                .max_keysize    = AES_MAX_KEY_SIZE,
1045                .ivsize         = AES_BLOCK_SIZE,
1046                .chunksize      = AES_BLOCK_SIZE,
1047                .setkey         = aesni_skcipher_setkey,
1048                .encrypt        = ctr_crypt,
1049                .decrypt        = ctr_crypt,
1050        }, {
1051                .base = {
1052                        .cra_name               = "__xts(aes)",
1053                        .cra_driver_name        = "__xts-aes-aesni",
1054                        .cra_priority           = 401,
1055                        .cra_flags              = CRYPTO_ALG_INTERNAL,
1056                        .cra_blocksize          = AES_BLOCK_SIZE,
1057                        .cra_ctxsize            = XTS_AES_CTX_SIZE,
1058                        .cra_module             = THIS_MODULE,
1059                },
1060                .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1061                .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1062                .ivsize         = AES_BLOCK_SIZE,
1063                .setkey         = xts_aesni_setkey,
1064                .encrypt        = xts_encrypt,
1065                .decrypt        = xts_decrypt,
1066#endif
1067        }
1068};
1069
1070struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1071
1072struct {
1073        const char *algname;
1074        const char *drvname;
1075        const char *basename;
1076        struct simd_skcipher_alg *simd;
1077} aesni_simd_skciphers2[] = {
1078#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
1079    IS_BUILTIN(CONFIG_CRYPTO_PCBC)
1080        {
1081                .algname        = "pcbc(aes)",
1082                .drvname        = "pcbc-aes-aesni",
1083                .basename       = "fpu(pcbc(__aes-aesni))",
1084        },
1085#endif
1086};
1087
1088#ifdef CONFIG_X86_64
1089static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1090                                  unsigned int key_len)
1091{
1092        struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1093
1094        return aes_set_key_common(crypto_aead_tfm(aead),
1095                                  &ctx->aes_key_expanded, key, key_len) ?:
1096               rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1097}
1098
1099static int generic_gcmaes_encrypt(struct aead_request *req)
1100{
1101        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1102        struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1103        void *aes_ctx = &(ctx->aes_key_expanded);
1104        u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1105        __be32 counter = cpu_to_be32(1);
1106
1107        memcpy(iv, req->iv, 12);
1108        *((__be32 *)(iv+12)) = counter;
1109
1110        return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1111                              aes_ctx);
1112}
1113
1114static int generic_gcmaes_decrypt(struct aead_request *req)
1115{
1116        __be32 counter = cpu_to_be32(1);
1117        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1118        struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1119        void *aes_ctx = &(ctx->aes_key_expanded);
1120        u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1121
1122        memcpy(iv, req->iv, 12);
1123        *((__be32 *)(iv+12)) = counter;
1124
1125        return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1126                              aes_ctx);
1127}
1128
1129static struct aead_alg aesni_aead_algs[] = { {
1130        .setkey                 = common_rfc4106_set_key,
1131        .setauthsize            = common_rfc4106_set_authsize,
1132        .encrypt                = helper_rfc4106_encrypt,
1133        .decrypt                = helper_rfc4106_decrypt,
1134        .ivsize                 = 8,
1135        .maxauthsize            = 16,
1136        .base = {
1137                .cra_name               = "__gcm-aes-aesni",
1138                .cra_driver_name        = "__driver-gcm-aes-aesni",
1139                .cra_flags              = CRYPTO_ALG_INTERNAL,
1140                .cra_blocksize          = 1,
1141                .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx),
1142                .cra_alignmask          = AESNI_ALIGN - 1,
1143                .cra_module             = THIS_MODULE,
1144        },
1145}, {
1146        .init                   = rfc4106_init,
1147        .exit                   = rfc4106_exit,
1148        .setkey                 = rfc4106_set_key,
1149        .setauthsize            = rfc4106_set_authsize,
1150        .encrypt                = rfc4106_encrypt,
1151        .decrypt                = rfc4106_decrypt,
1152        .ivsize                 = 8,
1153        .maxauthsize            = 16,
1154        .base = {
1155                .cra_name               = "rfc4106(gcm(aes))",
1156                .cra_driver_name        = "rfc4106-gcm-aesni",
1157                .cra_priority           = 400,
1158                .cra_flags              = CRYPTO_ALG_ASYNC,
1159                .cra_blocksize          = 1,
1160                .cra_ctxsize            = sizeof(struct cryptd_aead *),
1161                .cra_module             = THIS_MODULE,
1162        },
1163}, {
1164        .setkey                 = generic_gcmaes_set_key,
1165        .setauthsize            = generic_gcmaes_set_authsize,
1166        .encrypt                = generic_gcmaes_encrypt,
1167        .decrypt                = generic_gcmaes_decrypt,
1168        .ivsize                 = 12,
1169        .maxauthsize            = 16,
1170        .base = {
1171                .cra_name               = "gcm(aes)",
1172                .cra_driver_name        = "generic-gcm-aesni",
1173                .cra_priority           = 400,
1174                .cra_flags              = CRYPTO_ALG_ASYNC,
1175                .cra_blocksize          = 1,
1176                .cra_ctxsize            = sizeof(struct generic_gcmaes_ctx),
1177                .cra_alignmask          = AESNI_ALIGN - 1,
1178                .cra_module             = THIS_MODULE,
1179        },
1180} };
1181#else
1182static struct aead_alg aesni_aead_algs[0];
1183#endif
1184
1185
1186static const struct x86_cpu_id aesni_cpu_id[] = {
1187        X86_FEATURE_MATCH(X86_FEATURE_AES),
1188        {}
1189};
1190MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1191
1192static void aesni_free_simds(void)
1193{
1194        int i;
1195
1196        for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
1197                    aesni_simd_skciphers[i]; i++)
1198                simd_skcipher_free(aesni_simd_skciphers[i]);
1199
1200        for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
1201                if (aesni_simd_skciphers2[i].simd)
1202                        simd_skcipher_free(aesni_simd_skciphers2[i].simd);
1203}
1204
1205static int __init aesni_init(void)
1206{
1207        struct simd_skcipher_alg *simd;
1208        const char *basename;
1209        const char *algname;
1210        const char *drvname;
1211        int err;
1212        int i;
1213
1214        if (!x86_match_cpu(aesni_cpu_id))
1215                return -ENODEV;
1216#ifdef CONFIG_X86_64
1217#ifdef CONFIG_AS_AVX2
1218        if (boot_cpu_has(X86_FEATURE_AVX2)) {
1219                pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1220                aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1221                aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1222        } else
1223#endif
1224#ifdef CONFIG_AS_AVX
1225        if (boot_cpu_has(X86_FEATURE_AVX)) {
1226                pr_info("AVX version of gcm_enc/dec engaged.\n");
1227                aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1228                aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1229        } else
1230#endif
1231        {
1232                pr_info("SSE version of gcm_enc/dec engaged.\n");
1233                aesni_gcm_enc_tfm = aesni_gcm_enc;
1234                aesni_gcm_dec_tfm = aesni_gcm_dec;
1235        }
1236        aesni_ctr_enc_tfm = aesni_ctr_enc;
1237#ifdef CONFIG_AS_AVX
1238        if (boot_cpu_has(X86_FEATURE_AVX)) {
1239                /* optimize performance of ctr mode encryption transform */
1240                aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1241                pr_info("AES CTR mode by8 optimization enabled\n");
1242        }
1243#endif
1244#endif
1245
1246        err = crypto_fpu_init();
1247        if (err)
1248                return err;
1249
1250        err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1251        if (err)
1252                goto fpu_exit;
1253
1254        err = crypto_register_skciphers(aesni_skciphers,
1255                                        ARRAY_SIZE(aesni_skciphers));
1256        if (err)
1257                goto unregister_algs;
1258
1259        err = crypto_register_aeads(aesni_aead_algs,
1260                                    ARRAY_SIZE(aesni_aead_algs));
1261        if (err)
1262                goto unregister_skciphers;
1263
1264        for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
1265                algname = aesni_skciphers[i].base.cra_name + 2;
1266                drvname = aesni_skciphers[i].base.cra_driver_name + 2;
1267                basename = aesni_skciphers[i].base.cra_driver_name;
1268                simd = simd_skcipher_create_compat(algname, drvname, basename);
1269                err = PTR_ERR(simd);
1270                if (IS_ERR(simd))
1271                        goto unregister_simds;
1272
1273                aesni_simd_skciphers[i] = simd;
1274        }
1275
1276        for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) {
1277                algname = aesni_simd_skciphers2[i].algname;
1278                drvname = aesni_simd_skciphers2[i].drvname;
1279                basename = aesni_simd_skciphers2[i].basename;
1280                simd = simd_skcipher_create_compat(algname, drvname, basename);
1281                err = PTR_ERR(simd);
1282                if (IS_ERR(simd))
1283                        continue;
1284
1285                aesni_simd_skciphers2[i].simd = simd;
1286        }
1287
1288        return 0;
1289
1290unregister_simds:
1291        aesni_free_simds();
1292        crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1293unregister_skciphers:
1294        crypto_unregister_skciphers(aesni_skciphers,
1295                                    ARRAY_SIZE(aesni_skciphers));
1296unregister_algs:
1297        crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1298fpu_exit:
1299        crypto_fpu_exit();
1300        return err;
1301}
1302
1303static void __exit aesni_exit(void)
1304{
1305        aesni_free_simds();
1306        crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1307        crypto_unregister_skciphers(aesni_skciphers,
1308                                    ARRAY_SIZE(aesni_skciphers));
1309        crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1310
1311        crypto_fpu_exit();
1312}
1313
1314late_initcall(aesni_init);
1315module_exit(aesni_exit);
1316
1317MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1318MODULE_LICENSE("GPL");
1319MODULE_ALIAS_CRYPTO("aes");
1320