linux/drivers/crypto/geode-aes.c
<<
>>
Prefs
   1 /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
   2 *
   3 * This program is free software; you can redistribute it and/or modify
   4 * it under the terms of the GNU General Public License as published by
   5 * the Free Software Foundation; either version 2 of the License, or
   6 * (at your option) any later version.
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/kernel.h>
  11#include <linux/pci.h>
  12#include <linux/pci_ids.h>
  13#include <linux/crypto.h>
  14#include <linux/spinlock.h>
  15#include <crypto/algapi.h>
  16#include <crypto/aes.h>
  17
  18#include <asm/io.h>
  19#include <asm/delay.h>
  20
  21#include "geode-aes.h"
  22
  23/* Static structures */
  24
  25static void __iomem * _iobase;
  26static spinlock_t lock;
  27
  28/* Write a 128 bit field (either a writable key or IV) */
  29static inline void
  30_writefield(u32 offset, void *value)
  31{
  32        int i;
  33        for(i = 0; i < 4; i++)
  34                iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
  35}
  36
  37/* Read a 128 bit field (either a writable key or IV) */
  38static inline void
  39_readfield(u32 offset, void *value)
  40{
  41        int i;
  42        for(i = 0; i < 4; i++)
  43                ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
  44}
  45
  46static int
  47do_crypt(void *src, void *dst, int len, u32 flags)
  48{
  49        u32 status;
  50        u32 counter = AES_OP_TIMEOUT;
  51
  52        iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
  53        iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
  54        iowrite32(len,  _iobase + AES_LENA_REG);
  55
  56        /* Start the operation */
  57        iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
  58
  59        do {
  60                status = ioread32(_iobase + AES_INTR_REG);
  61                cpu_relax();
  62        } while(!(status & AES_INTRA_PENDING) && --counter);
  63
  64        /* Clear the event */
  65        iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
  66        return counter ? 0 : 1;
  67}
  68
  69static unsigned int
  70geode_aes_crypt(struct geode_aes_op *op)
  71{
  72        u32 flags = 0;
  73        unsigned long iflags;
  74        int ret;
  75
  76        if (op->len == 0)
  77                return 0;
  78
  79        /* If the source and destination is the same, then
  80         * we need to turn on the coherent flags, otherwise
  81         * we don't need to worry
  82         */
  83
  84        flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
  85
  86        if (op->dir == AES_DIR_ENCRYPT)
  87                flags |= AES_CTRL_ENCRYPT;
  88
  89        /* Start the critical section */
  90
  91        spin_lock_irqsave(&lock, iflags);
  92
  93        if (op->mode == AES_MODE_CBC) {
  94                flags |= AES_CTRL_CBC;
  95                _writefield(AES_WRITEIV0_REG, op->iv);
  96        }
  97
  98        if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
  99                flags |= AES_CTRL_WRKEY;
 100                _writefield(AES_WRITEKEY0_REG, op->key);
 101        }
 102
 103        ret = do_crypt(op->src, op->dst, op->len, flags);
 104        BUG_ON(ret);
 105
 106        if (op->mode == AES_MODE_CBC)
 107                _readfield(AES_WRITEIV0_REG, op->iv);
 108
 109        spin_unlock_irqrestore(&lock, iflags);
 110
 111        return op->len;
 112}
 113
 114/* CRYPTO-API Functions */
 115
 116static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
 117                unsigned int len)
 118{
 119        struct geode_aes_op *op = crypto_tfm_ctx(tfm);
 120        unsigned int ret;
 121
 122        op->keylen = len;
 123
 124        if (len == AES_KEYSIZE_128) {
 125                memcpy(op->key, key, len);
 126                return 0;
 127        }
 128
 129        if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
 130                /* not supported at all */
 131                tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 132                return -EINVAL;
 133        }
 134
 135        /*
 136         * The requested key size is not supported by HW, do a fallback
 137         */
 138        op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
 139        op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
 140
 141        ret = crypto_cipher_setkey(op->fallback.cip, key, len);
 142        if (ret) {
 143                tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
 144                tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
 145        }
 146        return ret;
 147}
 148
 149static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
 150                unsigned int len)
 151{
 152        struct geode_aes_op *op = crypto_tfm_ctx(tfm);
 153        unsigned int ret;
 154
 155        op->keylen = len;
 156
 157        if (len == AES_KEYSIZE_128) {
 158                memcpy(op->key, key, len);
 159                return 0;
 160        }
 161
 162        if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
 163                /* not supported at all */
 164                tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 165                return -EINVAL;
 166        }
 167
 168        /*
 169         * The requested key size is not supported by HW, do a fallback
 170         */
 171        op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
 172        op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
 173
 174        ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
 175        if (ret) {
 176                tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
 177                tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
 178        }
 179        return ret;
 180}
 181
 182static int fallback_blk_dec(struct blkcipher_desc *desc,
 183                struct scatterlist *dst, struct scatterlist *src,
 184                unsigned int nbytes)
 185{
 186        unsigned int ret;
 187        struct crypto_blkcipher *tfm;
 188        struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
 189
 190        tfm = desc->tfm;
 191        desc->tfm = op->fallback.blk;
 192
 193        ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
 194
 195        desc->tfm = tfm;
 196        return ret;
 197}
 198static int fallback_blk_enc(struct blkcipher_desc *desc,
 199                struct scatterlist *dst, struct scatterlist *src,
 200                unsigned int nbytes)
 201{
 202        unsigned int ret;
 203        struct crypto_blkcipher *tfm;
 204        struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
 205
 206        tfm = desc->tfm;
 207        desc->tfm = op->fallback.blk;
 208
 209        ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
 210
 211        desc->tfm = tfm;
 212        return ret;
 213}
 214
 215static void
 216geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 217{
 218        struct geode_aes_op *op = crypto_tfm_ctx(tfm);
 219
 220        if (unlikely(op->keylen != AES_KEYSIZE_128)) {
 221                crypto_cipher_encrypt_one(op->fallback.cip, out, in);
 222                return;
 223        }
 224
 225        op->src = (void *) in;
 226        op->dst = (void *) out;
 227        op->mode = AES_MODE_ECB;
 228        op->flags = 0;
 229        op->len = AES_MIN_BLOCK_SIZE;
 230        op->dir = AES_DIR_ENCRYPT;
 231
 232        geode_aes_crypt(op);
 233}
 234
 235
 236static void
 237geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 238{
 239        struct geode_aes_op *op = crypto_tfm_ctx(tfm);
 240
 241        if (unlikely(op->keylen != AES_KEYSIZE_128)) {
 242                crypto_cipher_decrypt_one(op->fallback.cip, out, in);
 243                return;
 244        }
 245
 246        op->src = (void *) in;
 247        op->dst = (void *) out;
 248        op->mode = AES_MODE_ECB;
 249        op->flags = 0;
 250        op->len = AES_MIN_BLOCK_SIZE;
 251        op->dir = AES_DIR_DECRYPT;
 252
 253        geode_aes_crypt(op);
 254}
 255
 256static int fallback_init_cip(struct crypto_tfm *tfm)
 257{
 258        const char *name = tfm->__crt_alg->cra_name;
 259        struct geode_aes_op *op = crypto_tfm_ctx(tfm);
 260
 261        op->fallback.cip = crypto_alloc_cipher(name, 0,
 262                                CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
 263
 264        if (IS_ERR(op->fallback.cip)) {
 265                printk(KERN_ERR "Error allocating fallback algo %s\n", name);
 266                return PTR_ERR(op->fallback.blk);
 267        }
 268
 269        return 0;
 270}
 271
 272static void fallback_exit_cip(struct crypto_tfm *tfm)
 273{
 274        struct geode_aes_op *op = crypto_tfm_ctx(tfm);
 275
 276        crypto_free_cipher(op->fallback.cip);
 277        op->fallback.cip = NULL;
 278}
 279
 280static struct crypto_alg geode_alg = {
 281        .cra_name                       =       "aes",
 282        .cra_driver_name        =       "geode-aes",
 283        .cra_priority           =       300,
 284        .cra_alignmask          =       15,
 285        .cra_flags                      =       CRYPTO_ALG_TYPE_CIPHER |
 286                                                        CRYPTO_ALG_NEED_FALLBACK,
 287        .cra_init                       =       fallback_init_cip,
 288        .cra_exit                       =       fallback_exit_cip,
 289        .cra_blocksize          =       AES_MIN_BLOCK_SIZE,
 290        .cra_ctxsize            =       sizeof(struct geode_aes_op),
 291        .cra_module                     =       THIS_MODULE,
 292        .cra_list                       =       LIST_HEAD_INIT(geode_alg.cra_list),
 293        .cra_u                          =       {
 294                .cipher =       {
 295                        .cia_min_keysize        =       AES_MIN_KEY_SIZE,
 296                        .cia_max_keysize        =       AES_MAX_KEY_SIZE,
 297                        .cia_setkey                     =       geode_setkey_cip,
 298                        .cia_encrypt            =       geode_encrypt,
 299                        .cia_decrypt            =       geode_decrypt
 300                }
 301        }
 302};
 303
 304static int
 305geode_cbc_decrypt(struct blkcipher_desc *desc,
 306                  struct scatterlist *dst, struct scatterlist *src,
 307                  unsigned int nbytes)
 308{
 309        struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
 310        struct blkcipher_walk walk;
 311        int err, ret;
 312
 313        if (unlikely(op->keylen != AES_KEYSIZE_128))
 314                return fallback_blk_dec(desc, dst, src, nbytes);
 315
 316        blkcipher_walk_init(&walk, dst, src, nbytes);
 317        err = blkcipher_walk_virt(desc, &walk);
 318        op->iv = walk.iv;
 319
 320        while((nbytes = walk.nbytes)) {
 321                op->src = walk.src.virt.addr,
 322                op->dst = walk.dst.virt.addr;
 323                op->mode = AES_MODE_CBC;
 324                op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
 325                op->dir = AES_DIR_DECRYPT;
 326
 327                ret = geode_aes_crypt(op);
 328
 329                nbytes -= ret;
 330                err = blkcipher_walk_done(desc, &walk, nbytes);
 331        }
 332
 333        return err;
 334}
 335
 336static int
 337geode_cbc_encrypt(struct blkcipher_desc *desc,
 338                  struct scatterlist *dst, struct scatterlist *src,
 339                  unsigned int nbytes)
 340{
 341        struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
 342        struct blkcipher_walk walk;
 343        int err, ret;
 344
 345        if (unlikely(op->keylen != AES_KEYSIZE_128))
 346                return fallback_blk_enc(desc, dst, src, nbytes);
 347
 348        blkcipher_walk_init(&walk, dst, src, nbytes);
 349        err = blkcipher_walk_virt(desc, &walk);
 350        op->iv = walk.iv;
 351
 352        while((nbytes = walk.nbytes)) {
 353                op->src = walk.src.virt.addr,
 354                op->dst = walk.dst.virt.addr;
 355                op->mode = AES_MODE_CBC;
 356                op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
 357                op->dir = AES_DIR_ENCRYPT;
 358
 359                ret = geode_aes_crypt(op);
 360                nbytes -= ret;
 361                err = blkcipher_walk_done(desc, &walk, nbytes);
 362        }
 363
 364        return err;
 365}
 366
 367static int fallback_init_blk(struct crypto_tfm *tfm)
 368{
 369        const char *name = tfm->__crt_alg->cra_name;
 370        struct geode_aes_op *op = crypto_tfm_ctx(tfm);
 371
 372        op->fallback.blk = crypto_alloc_blkcipher(name, 0,
 373                        CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
 374
 375        if (IS_ERR(op->fallback.blk)) {
 376                printk(KERN_ERR "Error allocating fallback algo %s\n", name);
 377                return PTR_ERR(op->fallback.blk);
 378        }
 379
 380        return 0;
 381}
 382
 383static void fallback_exit_blk(struct crypto_tfm *tfm)
 384{
 385        struct geode_aes_op *op = crypto_tfm_ctx(tfm);
 386
 387        crypto_free_blkcipher(op->fallback.blk);
 388        op->fallback.blk = NULL;
 389}
 390
 391static struct crypto_alg geode_cbc_alg = {
 392        .cra_name               =       "cbc(aes)",
 393        .cra_driver_name        =       "cbc-aes-geode",
 394        .cra_priority           =       400,
 395        .cra_flags                      =       CRYPTO_ALG_TYPE_BLKCIPHER |
 396                                                        CRYPTO_ALG_NEED_FALLBACK,
 397        .cra_init                       =       fallback_init_blk,
 398        .cra_exit                       =       fallback_exit_blk,
 399        .cra_blocksize          =       AES_MIN_BLOCK_SIZE,
 400        .cra_ctxsize            =       sizeof(struct geode_aes_op),
 401        .cra_alignmask          =       15,
 402        .cra_type                       =       &crypto_blkcipher_type,
 403        .cra_module                     =       THIS_MODULE,
 404        .cra_list                       =       LIST_HEAD_INIT(geode_cbc_alg.cra_list),
 405        .cra_u                          =       {
 406                .blkcipher      =       {
 407                        .min_keysize    =       AES_MIN_KEY_SIZE,
 408                        .max_keysize    =       AES_MAX_KEY_SIZE,
 409                        .setkey                 =       geode_setkey_blk,
 410                        .encrypt                =       geode_cbc_encrypt,
 411                        .decrypt                =       geode_cbc_decrypt,
 412                        .ivsize                 =       AES_IV_LENGTH,
 413                }
 414        }
 415};
 416
 417static int
 418geode_ecb_decrypt(struct blkcipher_desc *desc,
 419                  struct scatterlist *dst, struct scatterlist *src,
 420                  unsigned int nbytes)
 421{
 422        struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
 423        struct blkcipher_walk walk;
 424        int err, ret;
 425
 426        if (unlikely(op->keylen != AES_KEYSIZE_128))
 427                return fallback_blk_dec(desc, dst, src, nbytes);
 428
 429        blkcipher_walk_init(&walk, dst, src, nbytes);
 430        err = blkcipher_walk_virt(desc, &walk);
 431
 432        while((nbytes = walk.nbytes)) {
 433                op->src = walk.src.virt.addr,
 434                op->dst = walk.dst.virt.addr;
 435                op->mode = AES_MODE_ECB;
 436                op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
 437                op->dir = AES_DIR_DECRYPT;
 438
 439                ret = geode_aes_crypt(op);
 440                nbytes -= ret;
 441                err = blkcipher_walk_done(desc, &walk, nbytes);
 442        }
 443
 444        return err;
 445}
 446
 447static int
 448geode_ecb_encrypt(struct blkcipher_desc *desc,
 449                  struct scatterlist *dst, struct scatterlist *src,
 450                  unsigned int nbytes)
 451{
 452        struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
 453        struct blkcipher_walk walk;
 454        int err, ret;
 455
 456        if (unlikely(op->keylen != AES_KEYSIZE_128))
 457                return fallback_blk_enc(desc, dst, src, nbytes);
 458
 459        blkcipher_walk_init(&walk, dst, src, nbytes);
 460        err = blkcipher_walk_virt(desc, &walk);
 461
 462        while((nbytes = walk.nbytes)) {
 463                op->src = walk.src.virt.addr,
 464                op->dst = walk.dst.virt.addr;
 465                op->mode = AES_MODE_ECB;
 466                op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
 467                op->dir = AES_DIR_ENCRYPT;
 468
 469                ret = geode_aes_crypt(op);
 470                nbytes -= ret;
 471                ret =  blkcipher_walk_done(desc, &walk, nbytes);
 472        }
 473
 474        return err;
 475}
 476
 477static struct crypto_alg geode_ecb_alg = {
 478        .cra_name                       =       "ecb(aes)",
 479        .cra_driver_name        =       "ecb-aes-geode",
 480        .cra_priority           =       400,
 481        .cra_flags                      =       CRYPTO_ALG_TYPE_BLKCIPHER |
 482                                                        CRYPTO_ALG_NEED_FALLBACK,
 483        .cra_init                       =       fallback_init_blk,
 484        .cra_exit                       =       fallback_exit_blk,
 485        .cra_blocksize          =       AES_MIN_BLOCK_SIZE,
 486        .cra_ctxsize            =       sizeof(struct geode_aes_op),
 487        .cra_alignmask          =       15,
 488        .cra_type                       =       &crypto_blkcipher_type,
 489        .cra_module                     =       THIS_MODULE,
 490        .cra_list                       =       LIST_HEAD_INIT(geode_ecb_alg.cra_list),
 491        .cra_u                          =       {
 492                .blkcipher      =       {
 493                        .min_keysize    =       AES_MIN_KEY_SIZE,
 494                        .max_keysize    =       AES_MAX_KEY_SIZE,
 495                        .setkey                 =       geode_setkey_blk,
 496                        .encrypt                =       geode_ecb_encrypt,
 497                        .decrypt                =       geode_ecb_decrypt,
 498                }
 499        }
 500};
 501
 502static void __devexit
 503geode_aes_remove(struct pci_dev *dev)
 504{
 505        crypto_unregister_alg(&geode_alg);
 506        crypto_unregister_alg(&geode_ecb_alg);
 507        crypto_unregister_alg(&geode_cbc_alg);
 508
 509        pci_iounmap(dev, _iobase);
 510        _iobase = NULL;
 511
 512        pci_release_regions(dev);
 513        pci_disable_device(dev);
 514}
 515
 516
 517static int __devinit
 518geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
 519{
 520        int ret;
 521
 522        if ((ret = pci_enable_device(dev)))
 523                return ret;
 524
 525        if ((ret = pci_request_regions(dev, "geode-aes")))
 526                goto eenable;
 527
 528        _iobase = pci_iomap(dev, 0, 0);
 529
 530        if (_iobase == NULL) {
 531                ret = -ENOMEM;
 532                goto erequest;
 533        }
 534
 535        spin_lock_init(&lock);
 536
 537        /* Clear any pending activity */
 538        iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
 539
 540        if ((ret = crypto_register_alg(&geode_alg)))
 541                goto eiomap;
 542
 543        if ((ret = crypto_register_alg(&geode_ecb_alg)))
 544                goto ealg;
 545
 546        if ((ret = crypto_register_alg(&geode_cbc_alg)))
 547                goto eecb;
 548
 549        printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
 550        return 0;
 551
 552 eecb:
 553        crypto_unregister_alg(&geode_ecb_alg);
 554
 555 ealg:
 556        crypto_unregister_alg(&geode_alg);
 557
 558 eiomap:
 559        pci_iounmap(dev, _iobase);
 560
 561 erequest:
 562        pci_release_regions(dev);
 563
 564 eenable:
 565        pci_disable_device(dev);
 566
 567        printk(KERN_ERR "geode-aes:  GEODE AES initialization failed.\n");
 568        return ret;
 569}
 570
 571static struct pci_device_id geode_aes_tbl[] = {
 572        { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, PCI_ANY_ID, PCI_ANY_ID} ,
 573        { 0, }
 574};
 575
 576MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
 577
 578static struct pci_driver geode_aes_driver = {
 579        .name = "Geode LX AES",
 580        .id_table = geode_aes_tbl,
 581        .probe = geode_aes_probe,
 582        .remove = __devexit_p(geode_aes_remove)
 583};
 584
 585static int __init
 586geode_aes_init(void)
 587{
 588        return pci_register_driver(&geode_aes_driver);
 589}
 590
 591static void __exit
 592geode_aes_exit(void)
 593{
 594        pci_unregister_driver(&geode_aes_driver);
 595}
 596
 597MODULE_AUTHOR("Advanced Micro Devices, Inc.");
 598MODULE_DESCRIPTION("Geode LX Hardware AES driver");
 599MODULE_LICENSE("GPL");
 600
 601module_init(geode_aes_init);
 602module_exit(geode_aes_exit);
 603