linux/drivers/crypto/padlock-aes.c
<<
>>
Prefs
   1/* 
   2 * Cryptographic API.
   3 *
   4 * Support for VIA PadLock hardware crypto engine.
   5 *
   6 * Copyright (c) 2004  Michal Ludvig <michal@logix.cz>
   7 *
   8 */
   9
  10#include <crypto/algapi.h>
  11#include <crypto/aes.h>
  12#include <linux/module.h>
  13#include <linux/init.h>
  14#include <linux/types.h>
  15#include <linux/errno.h>
  16#include <linux/interrupt.h>
  17#include <linux/kernel.h>
  18#include <linux/percpu.h>
  19#include <linux/smp.h>
  20#include <asm/byteorder.h>
  21#include <asm/processor.h>
  22#include <asm/i387.h>
  23#include "padlock.h"
  24
  25/*
  26 * Number of data blocks actually fetched for each xcrypt insn.
  27 * Processors with prefetch errata will fetch extra blocks.
  28 */
  29static unsigned int ecb_fetch_blocks = 2;
  30#define MAX_ECB_FETCH_BLOCKS (8)
  31#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
  32
  33static unsigned int cbc_fetch_blocks = 1;
  34#define MAX_CBC_FETCH_BLOCKS (4)
  35#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
  36
  37/* Control word. */
  38struct cword {
  39        unsigned int __attribute__ ((__packed__))
  40                rounds:4,
  41                algo:3,
  42                keygen:1,
  43                interm:1,
  44                encdec:1,
  45                ksize:2;
  46} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
  47
  48/* Whenever making any changes to the following
  49 * structure *make sure* you keep E, d_data
  50 * and cword aligned on 16 Bytes boundaries and
  51 * the Hardware can access 16 * 16 bytes of E and d_data
  52 * (only the first 15 * 16 bytes matter but the HW reads
  53 * more).
  54 */
  55struct aes_ctx {
  56        u32 E[AES_MAX_KEYLENGTH_U32]
  57                __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
  58        u32 d_data[AES_MAX_KEYLENGTH_U32]
  59                __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
  60        struct {
  61                struct cword encrypt;
  62                struct cword decrypt;
  63        } cword;
  64        u32 *D;
  65};
  66
  67static DEFINE_PER_CPU(struct cword *, last_cword);
  68
  69/* Tells whether the ACE is capable to generate
  70   the extended key for a given key_len. */
  71static inline int
  72aes_hw_extkey_available(uint8_t key_len)
  73{
  74        /* TODO: We should check the actual CPU model/stepping
  75                 as it's possible that the capability will be
  76                 added in the next CPU revisions. */
  77        if (key_len == 16)
  78                return 1;
  79        return 0;
  80}
  81
  82static inline struct aes_ctx *aes_ctx_common(void *ctx)
  83{
  84        unsigned long addr = (unsigned long)ctx;
  85        unsigned long align = PADLOCK_ALIGNMENT;
  86
  87        if (align <= crypto_tfm_ctx_alignment())
  88                align = 1;
  89        return (struct aes_ctx *)ALIGN(addr, align);
  90}
  91
  92static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
  93{
  94        return aes_ctx_common(crypto_tfm_ctx(tfm));
  95}
  96
  97static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
  98{
  99        return aes_ctx_common(crypto_blkcipher_ctx(tfm));
 100}
 101
 102static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 103                       unsigned int key_len)
 104{
 105        struct aes_ctx *ctx = aes_ctx(tfm);
 106        const __le32 *key = (const __le32 *)in_key;
 107        u32 *flags = &tfm->crt_flags;
 108        struct crypto_aes_ctx gen_aes;
 109        int cpu;
 110
 111        if (key_len % 8) {
 112                *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 113                return -EINVAL;
 114        }
 115
 116        /*
 117         * If the hardware is capable of generating the extended key
 118         * itself we must supply the plain key for both encryption
 119         * and decryption.
 120         */
 121        ctx->D = ctx->E;
 122
 123        ctx->E[0] = le32_to_cpu(key[0]);
 124        ctx->E[1] = le32_to_cpu(key[1]);
 125        ctx->E[2] = le32_to_cpu(key[2]);
 126        ctx->E[3] = le32_to_cpu(key[3]);
 127
 128        /* Prepare control words. */
 129        memset(&ctx->cword, 0, sizeof(ctx->cword));
 130
 131        ctx->cword.decrypt.encdec = 1;
 132        ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
 133        ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
 134        ctx->cword.encrypt.ksize = (key_len - 16) / 8;
 135        ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
 136
 137        /* Don't generate extended keys if the hardware can do it. */
 138        if (aes_hw_extkey_available(key_len))
 139                goto ok;
 140
 141        ctx->D = ctx->d_data;
 142        ctx->cword.encrypt.keygen = 1;
 143        ctx->cword.decrypt.keygen = 1;
 144
 145        if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
 146                *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 147                return -EINVAL;
 148        }
 149
 150        memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
 151        memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
 152
 153ok:
 154        for_each_online_cpu(cpu)
 155                if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) ||
 156                    &ctx->cword.decrypt == per_cpu(last_cword, cpu))
 157                        per_cpu(last_cword, cpu) = NULL;
 158
 159        return 0;
 160}
 161
 162/* ====== Encryption/decryption routines ====== */
 163
 164/* These are the real call to PadLock. */
 165static inline void padlock_reset_key(struct cword *cword)
 166{
 167        int cpu = raw_smp_processor_id();
 168
 169        if (cword != per_cpu(last_cword, cpu))
 170#ifndef CONFIG_X86_64
 171                asm volatile ("pushfl; popfl");
 172#else
 173                asm volatile ("pushfq; popfq");
 174#endif
 175}
 176
 177static inline void padlock_store_cword(struct cword *cword)
 178{
 179        per_cpu(last_cword, raw_smp_processor_id()) = cword;
 180}
 181
 182/*
 183 * While the padlock instructions don't use FP/SSE registers, they
 184 * generate a spurious DNA fault when cr0.ts is '1'. These instructions
 185 * should be used only inside the irq_ts_save/restore() context
 186 */
 187
 188static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
 189                                  struct cword *control_word, int count)
 190{
 191        asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"       /* rep xcryptecb */
 192                      : "+S"(input), "+D"(output)
 193                      : "d"(control_word), "b"(key), "c"(count));
 194}
 195
 196static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
 197                                 u8 *iv, struct cword *control_word, int count)
 198{
 199        asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"       /* rep xcryptcbc */
 200                      : "+S" (input), "+D" (output), "+a" (iv)
 201                      : "d" (control_word), "b" (key), "c" (count));
 202        return iv;
 203}
 204
 205static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
 206                           struct cword *cword, int count)
 207{
 208        /*
 209         * Padlock prefetches extra data so we must provide mapped input buffers.
 210         * Assume there are at least 16 bytes of stack already in use.
 211         */
 212        u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
 213        u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
 214
 215        memcpy(tmp, in, count * AES_BLOCK_SIZE);
 216        rep_xcrypt_ecb(tmp, out, key, cword, count);
 217}
 218
 219static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
 220                           u8 *iv, struct cword *cword, int count)
 221{
 222        /*
 223         * Padlock prefetches extra data so we must provide mapped input buffers.
 224         * Assume there are at least 16 bytes of stack already in use.
 225         */
 226        u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
 227        u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
 228
 229        memcpy(tmp, in, count * AES_BLOCK_SIZE);
 230        return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
 231}
 232
 233static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
 234                             struct cword *cword, int count)
 235{
 236        /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
 237         * We could avoid some copying here but it's probably not worth it.
 238         */
 239        if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) {
 240                ecb_crypt_copy(in, out, key, cword, count);
 241                return;
 242        }
 243
 244        rep_xcrypt_ecb(in, out, key, cword, count);
 245}
 246
 247static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
 248                            u8 *iv, struct cword *cword, int count)
 249{
 250        /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
 251        if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE))
 252                return cbc_crypt_copy(in, out, key, iv, cword, count);
 253
 254        return rep_xcrypt_cbc(in, out, key, iv, cword, count);
 255}
 256
 257static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
 258                                      void *control_word, u32 count)
 259{
 260        u32 initial = count & (ecb_fetch_blocks - 1);
 261
 262        if (count < ecb_fetch_blocks) {
 263                ecb_crypt(input, output, key, control_word, count);
 264                return;
 265        }
 266
 267        if (initial)
 268                asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"       /* rep xcryptecb */
 269                              : "+S"(input), "+D"(output)
 270                              : "d"(control_word), "b"(key), "c"(initial));
 271
 272        asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"       /* rep xcryptecb */
 273                      : "+S"(input), "+D"(output)
 274                      : "d"(control_word), "b"(key), "c"(count - initial));
 275}
 276
 277static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
 278                                     u8 *iv, void *control_word, u32 count)
 279{
 280        u32 initial = count & (cbc_fetch_blocks - 1);
 281
 282        if (count < cbc_fetch_blocks)
 283                return cbc_crypt(input, output, key, iv, control_word, count);
 284
 285        if (initial)
 286                asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"       /* rep xcryptcbc */
 287                              : "+S" (input), "+D" (output), "+a" (iv)
 288                              : "d" (control_word), "b" (key), "c" (count));
 289
 290        asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"       /* rep xcryptcbc */
 291                      : "+S" (input), "+D" (output), "+a" (iv)
 292                      : "d" (control_word), "b" (key), "c" (count-initial));
 293        return iv;
 294}
 295
 296static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 297{
 298        struct aes_ctx *ctx = aes_ctx(tfm);
 299        int ts_state;
 300
 301        padlock_reset_key(&ctx->cword.encrypt);
 302        ts_state = irq_ts_save();
 303        ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
 304        irq_ts_restore(ts_state);
 305        padlock_store_cword(&ctx->cword.encrypt);
 306}
 307
 308static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 309{
 310        struct aes_ctx *ctx = aes_ctx(tfm);
 311        int ts_state;
 312
 313        padlock_reset_key(&ctx->cword.encrypt);
 314        ts_state = irq_ts_save();
 315        ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
 316        irq_ts_restore(ts_state);
 317        padlock_store_cword(&ctx->cword.encrypt);
 318}
 319
 320static struct crypto_alg aes_alg = {
 321        .cra_name               =       "aes",
 322        .cra_driver_name        =       "aes-padlock",
 323        .cra_priority           =       PADLOCK_CRA_PRIORITY,
 324        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
 325        .cra_blocksize          =       AES_BLOCK_SIZE,
 326        .cra_ctxsize            =       sizeof(struct aes_ctx),
 327        .cra_alignmask          =       PADLOCK_ALIGNMENT - 1,
 328        .cra_module             =       THIS_MODULE,
 329        .cra_list               =       LIST_HEAD_INIT(aes_alg.cra_list),
 330        .cra_u                  =       {
 331                .cipher = {
 332                        .cia_min_keysize        =       AES_MIN_KEY_SIZE,
 333                        .cia_max_keysize        =       AES_MAX_KEY_SIZE,
 334                        .cia_setkey             =       aes_set_key,
 335                        .cia_encrypt            =       aes_encrypt,
 336                        .cia_decrypt            =       aes_decrypt,
 337                }
 338        }
 339};
 340
 341static int ecb_aes_encrypt(struct blkcipher_desc *desc,
 342                           struct scatterlist *dst, struct scatterlist *src,
 343                           unsigned int nbytes)
 344{
 345        struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
 346        struct blkcipher_walk walk;
 347        int err;
 348        int ts_state;
 349
 350        padlock_reset_key(&ctx->cword.encrypt);
 351
 352        blkcipher_walk_init(&walk, dst, src, nbytes);
 353        err = blkcipher_walk_virt(desc, &walk);
 354
 355        ts_state = irq_ts_save();
 356        while ((nbytes = walk.nbytes)) {
 357                padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
 358                                   ctx->E, &ctx->cword.encrypt,
 359                                   nbytes / AES_BLOCK_SIZE);
 360                nbytes &= AES_BLOCK_SIZE - 1;
 361                err = blkcipher_walk_done(desc, &walk, nbytes);
 362        }
 363        irq_ts_restore(ts_state);
 364
 365        padlock_store_cword(&ctx->cword.encrypt);
 366
 367        return err;
 368}
 369
 370static int ecb_aes_decrypt(struct blkcipher_desc *desc,
 371                           struct scatterlist *dst, struct scatterlist *src,
 372                           unsigned int nbytes)
 373{
 374        struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
 375        struct blkcipher_walk walk;
 376        int err;
 377        int ts_state;
 378
 379        padlock_reset_key(&ctx->cword.decrypt);
 380
 381        blkcipher_walk_init(&walk, dst, src, nbytes);
 382        err = blkcipher_walk_virt(desc, &walk);
 383
 384        ts_state = irq_ts_save();
 385        while ((nbytes = walk.nbytes)) {
 386                padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
 387                                   ctx->D, &ctx->cword.decrypt,
 388                                   nbytes / AES_BLOCK_SIZE);
 389                nbytes &= AES_BLOCK_SIZE - 1;
 390                err = blkcipher_walk_done(desc, &walk, nbytes);
 391        }
 392        irq_ts_restore(ts_state);
 393
 394        padlock_store_cword(&ctx->cword.encrypt);
 395
 396        return err;
 397}
 398
 399static struct crypto_alg ecb_aes_alg = {
 400        .cra_name               =       "ecb(aes)",
 401        .cra_driver_name        =       "ecb-aes-padlock",
 402        .cra_priority           =       PADLOCK_COMPOSITE_PRIORITY,
 403        .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
 404        .cra_blocksize          =       AES_BLOCK_SIZE,
 405        .cra_ctxsize            =       sizeof(struct aes_ctx),
 406        .cra_alignmask          =       PADLOCK_ALIGNMENT - 1,
 407        .cra_type               =       &crypto_blkcipher_type,
 408        .cra_module             =       THIS_MODULE,
 409        .cra_list               =       LIST_HEAD_INIT(ecb_aes_alg.cra_list),
 410        .cra_u                  =       {
 411                .blkcipher = {
 412                        .min_keysize            =       AES_MIN_KEY_SIZE,
 413                        .max_keysize            =       AES_MAX_KEY_SIZE,
 414                        .setkey                 =       aes_set_key,
 415                        .encrypt                =       ecb_aes_encrypt,
 416                        .decrypt                =       ecb_aes_decrypt,
 417                }
 418        }
 419};
 420
 421static int cbc_aes_encrypt(struct blkcipher_desc *desc,
 422                           struct scatterlist *dst, struct scatterlist *src,
 423                           unsigned int nbytes)
 424{
 425        struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
 426        struct blkcipher_walk walk;
 427        int err;
 428        int ts_state;
 429
 430        padlock_reset_key(&ctx->cword.encrypt);
 431
 432        blkcipher_walk_init(&walk, dst, src, nbytes);
 433        err = blkcipher_walk_virt(desc, &walk);
 434
 435        ts_state = irq_ts_save();
 436        while ((nbytes = walk.nbytes)) {
 437                u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
 438                                            walk.dst.virt.addr, ctx->E,
 439                                            walk.iv, &ctx->cword.encrypt,
 440                                            nbytes / AES_BLOCK_SIZE);
 441                memcpy(walk.iv, iv, AES_BLOCK_SIZE);
 442                nbytes &= AES_BLOCK_SIZE - 1;
 443                err = blkcipher_walk_done(desc, &walk, nbytes);
 444        }
 445        irq_ts_restore(ts_state);
 446
 447        padlock_store_cword(&ctx->cword.decrypt);
 448
 449        return err;
 450}
 451
 452static int cbc_aes_decrypt(struct blkcipher_desc *desc,
 453                           struct scatterlist *dst, struct scatterlist *src,
 454                           unsigned int nbytes)
 455{
 456        struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
 457        struct blkcipher_walk walk;
 458        int err;
 459        int ts_state;
 460
 461        padlock_reset_key(&ctx->cword.encrypt);
 462
 463        blkcipher_walk_init(&walk, dst, src, nbytes);
 464        err = blkcipher_walk_virt(desc, &walk);
 465
 466        ts_state = irq_ts_save();
 467        while ((nbytes = walk.nbytes)) {
 468                padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
 469                                   ctx->D, walk.iv, &ctx->cword.decrypt,
 470                                   nbytes / AES_BLOCK_SIZE);
 471                nbytes &= AES_BLOCK_SIZE - 1;
 472                err = blkcipher_walk_done(desc, &walk, nbytes);
 473        }
 474
 475        irq_ts_restore(ts_state);
 476
 477        padlock_store_cword(&ctx->cword.encrypt);
 478
 479        return err;
 480}
 481
 482static struct crypto_alg cbc_aes_alg = {
 483        .cra_name               =       "cbc(aes)",
 484        .cra_driver_name        =       "cbc-aes-padlock",
 485        .cra_priority           =       PADLOCK_COMPOSITE_PRIORITY,
 486        .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
 487        .cra_blocksize          =       AES_BLOCK_SIZE,
 488        .cra_ctxsize            =       sizeof(struct aes_ctx),
 489        .cra_alignmask          =       PADLOCK_ALIGNMENT - 1,
 490        .cra_type               =       &crypto_blkcipher_type,
 491        .cra_module             =       THIS_MODULE,
 492        .cra_list               =       LIST_HEAD_INIT(cbc_aes_alg.cra_list),
 493        .cra_u                  =       {
 494                .blkcipher = {
 495                        .min_keysize            =       AES_MIN_KEY_SIZE,
 496                        .max_keysize            =       AES_MAX_KEY_SIZE,
 497                        .ivsize                 =       AES_BLOCK_SIZE,
 498                        .setkey                 =       aes_set_key,
 499                        .encrypt                =       cbc_aes_encrypt,
 500                        .decrypt                =       cbc_aes_decrypt,
 501                }
 502        }
 503};
 504
 505static int __init padlock_init(void)
 506{
 507        int ret;
 508        struct cpuinfo_x86 *c = &cpu_data(0);
 509
 510        if (!cpu_has_xcrypt) {
 511                printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
 512                return -ENODEV;
 513        }
 514
 515        if (!cpu_has_xcrypt_enabled) {
 516                printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
 517                return -ENODEV;
 518        }
 519
 520        if ((ret = crypto_register_alg(&aes_alg)))
 521                goto aes_err;
 522
 523        if ((ret = crypto_register_alg(&ecb_aes_alg)))
 524                goto ecb_aes_err;
 525
 526        if ((ret = crypto_register_alg(&cbc_aes_alg)))
 527                goto cbc_aes_err;
 528
 529        printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
 530
 531        if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
 532                ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
 533                cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
 534                printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
 535        }
 536
 537out:
 538        return ret;
 539
 540cbc_aes_err:
 541        crypto_unregister_alg(&ecb_aes_alg);
 542ecb_aes_err:
 543        crypto_unregister_alg(&aes_alg);
 544aes_err:
 545        printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
 546        goto out;
 547}
 548
 549static void __exit padlock_fini(void)
 550{
 551        crypto_unregister_alg(&cbc_aes_alg);
 552        crypto_unregister_alg(&ecb_aes_alg);
 553        crypto_unregister_alg(&aes_alg);
 554}
 555
 556module_init(padlock_init);
 557module_exit(padlock_fini);
 558
 559MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
 560MODULE_LICENSE("GPL");
 561MODULE_AUTHOR("Michal Ludvig");
 562
 563MODULE_ALIAS("aes");
 564