linux/drivers/crypto/ixp4xx_crypto.c
<<
>>
Prefs
   1/*
   2 * Intel IXP4xx NPE-C crypto driver
   3 *
   4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of version 2 of the GNU General Public License
   8 * as published by the Free Software Foundation.
   9 *
  10 */
  11
  12#include <linux/platform_device.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/dmapool.h>
  15#include <linux/crypto.h>
  16#include <linux/kernel.h>
  17#include <linux/rtnetlink.h>
  18#include <linux/interrupt.h>
  19#include <linux/spinlock.h>
  20#include <linux/gfp.h>
  21#include <linux/module.h>
  22
  23#include <crypto/ctr.h>
  24#include <crypto/des.h>
  25#include <crypto/aes.h>
  26#include <crypto/sha.h>
  27#include <crypto/algapi.h>
  28#include <crypto/internal/aead.h>
  29#include <crypto/authenc.h>
  30#include <crypto/scatterwalk.h>
  31
  32#include <mach/npe.h>
  33#include <mach/qmgr.h>
  34
  35#define MAX_KEYLEN 32
  36
  37/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
  38#define NPE_CTX_LEN 80
  39#define AES_BLOCK128 16
  40
  41#define NPE_OP_HASH_VERIFY   0x01
  42#define NPE_OP_CCM_ENABLE    0x04
  43#define NPE_OP_CRYPT_ENABLE  0x08
  44#define NPE_OP_HASH_ENABLE   0x10
  45#define NPE_OP_NOT_IN_PLACE  0x20
  46#define NPE_OP_HMAC_DISABLE  0x40
  47#define NPE_OP_CRYPT_ENCRYPT 0x80
  48
  49#define NPE_OP_CCM_GEN_MIC   0xcc
  50#define NPE_OP_HASH_GEN_ICV  0x50
  51#define NPE_OP_ENC_GEN_KEY   0xc9
  52
  53#define MOD_ECB     0x0000
  54#define MOD_CTR     0x1000
  55#define MOD_CBC_ENC 0x2000
  56#define MOD_CBC_DEC 0x3000
  57#define MOD_CCM_ENC 0x4000
  58#define MOD_CCM_DEC 0x5000
  59
  60#define KEYLEN_128  4
  61#define KEYLEN_192  6
  62#define KEYLEN_256  8
  63
  64#define CIPH_DECR   0x0000
  65#define CIPH_ENCR   0x0400
  66
  67#define MOD_DES     0x0000
  68#define MOD_TDEA2   0x0100
  69#define MOD_3DES   0x0200
  70#define MOD_AES     0x0800
  71#define MOD_AES128  (0x0800 | KEYLEN_128)
  72#define MOD_AES192  (0x0900 | KEYLEN_192)
  73#define MOD_AES256  (0x0a00 | KEYLEN_256)
  74
  75#define MAX_IVLEN   16
  76#define NPE_ID      2  /* NPE C */
  77#define NPE_QLEN    16
  78/* Space for registering when the first
  79 * NPE_QLEN crypt_ctl are busy */
  80#define NPE_QLEN_TOTAL 64
  81
  82#define SEND_QID    29
  83#define RECV_QID    30
  84
  85#define CTL_FLAG_UNUSED         0x0000
  86#define CTL_FLAG_USED           0x1000
  87#define CTL_FLAG_PERFORM_ABLK   0x0001
  88#define CTL_FLAG_GEN_ICV        0x0002
  89#define CTL_FLAG_GEN_REVAES     0x0004
  90#define CTL_FLAG_PERFORM_AEAD   0x0008
  91#define CTL_FLAG_MASK           0x000f
  92
  93#define HMAC_IPAD_VALUE   0x36
  94#define HMAC_OPAD_VALUE   0x5C
  95#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
  96
  97#define MD5_DIGEST_SIZE   16
  98
  99struct buffer_desc {
 100        u32 phys_next;
 101#ifdef __ARMEB__
 102        u16 buf_len;
 103        u16 pkt_len;
 104#else
 105        u16 pkt_len;
 106        u16 buf_len;
 107#endif
 108        u32 phys_addr;
 109        u32 __reserved[4];
 110        struct buffer_desc *next;
 111        enum dma_data_direction dir;
 112};
 113
 114struct crypt_ctl {
 115#ifdef __ARMEB__
 116        u8 mode;                /* NPE_OP_*  operation mode */
 117        u8 init_len;
 118        u16 reserved;
 119#else
 120        u16 reserved;
 121        u8 init_len;
 122        u8 mode;                /* NPE_OP_*  operation mode */
 123#endif
 124        u8 iv[MAX_IVLEN];       /* IV for CBC mode or CTR IV for CTR mode */
 125        u32 icv_rev_aes;        /* icv or rev aes */
 126        u32 src_buf;
 127        u32 dst_buf;
 128#ifdef __ARMEB__
 129        u16 auth_offs;          /* Authentication start offset */
 130        u16 auth_len;           /* Authentication data length */
 131        u16 crypt_offs;         /* Cryption start offset */
 132        u16 crypt_len;          /* Cryption data length */
 133#else
 134        u16 auth_len;           /* Authentication data length */
 135        u16 auth_offs;          /* Authentication start offset */
 136        u16 crypt_len;          /* Cryption data length */
 137        u16 crypt_offs;         /* Cryption start offset */
 138#endif
 139        u32 aadAddr;            /* Additional Auth Data Addr for CCM mode */
 140        u32 crypto_ctx;         /* NPE Crypto Param structure address */
 141
 142        /* Used by Host: 4*4 bytes*/
 143        unsigned ctl_flags;
 144        union {
 145                struct ablkcipher_request *ablk_req;
 146                struct aead_request *aead_req;
 147                struct crypto_tfm *tfm;
 148        } data;
 149        struct buffer_desc *regist_buf;
 150        u8 *regist_ptr;
 151};
 152
 153struct ablk_ctx {
 154        struct buffer_desc *src;
 155        struct buffer_desc *dst;
 156};
 157
 158struct aead_ctx {
 159        struct buffer_desc *src;
 160        struct buffer_desc *dst;
 161        struct scatterlist ivlist;
 162        /* used when the hmac is not on one sg entry */
 163        u8 *hmac_virt;
 164        int encrypt;
 165};
 166
 167struct ix_hash_algo {
 168        u32 cfgword;
 169        unsigned char *icv;
 170};
 171
 172struct ix_sa_dir {
 173        unsigned char *npe_ctx;
 174        dma_addr_t npe_ctx_phys;
 175        int npe_ctx_idx;
 176        u8 npe_mode;
 177};
 178
 179struct ixp_ctx {
 180        struct ix_sa_dir encrypt;
 181        struct ix_sa_dir decrypt;
 182        int authkey_len;
 183        u8 authkey[MAX_KEYLEN];
 184        int enckey_len;
 185        u8 enckey[MAX_KEYLEN];
 186        u8 salt[MAX_IVLEN];
 187        u8 nonce[CTR_RFC3686_NONCE_SIZE];
 188        unsigned salted;
 189        atomic_t configuring;
 190        struct completion completion;
 191};
 192
 193struct ixp_alg {
 194        struct crypto_alg crypto;
 195        const struct ix_hash_algo *hash;
 196        u32 cfg_enc;
 197        u32 cfg_dec;
 198
 199        int registered;
 200};
 201
 202struct ixp_aead_alg {
 203        struct aead_alg crypto;
 204        const struct ix_hash_algo *hash;
 205        u32 cfg_enc;
 206        u32 cfg_dec;
 207
 208        int registered;
 209};
 210
 211static const struct ix_hash_algo hash_alg_md5 = {
 212        .cfgword        = 0xAA010004,
 213        .icv            = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
 214                          "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
 215};
 216static const struct ix_hash_algo hash_alg_sha1 = {
 217        .cfgword        = 0x00000005,
 218        .icv            = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
 219                          "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
 220};
 221
 222static struct npe *npe_c;
 223static struct dma_pool *buffer_pool = NULL;
 224static struct dma_pool *ctx_pool = NULL;
 225
 226static struct crypt_ctl *crypt_virt = NULL;
 227static dma_addr_t crypt_phys;
 228
 229static int support_aes = 1;
 230
 231#define DRIVER_NAME "ixp4xx_crypto"
 232
 233static struct platform_device *pdev;
 234
 235static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
 236{
 237        return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
 238}
 239
 240static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
 241{
 242        return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
 243}
 244
 245static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
 246{
 247        return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
 248}
 249
 250static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
 251{
 252        return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
 253}
 254
 255static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
 256{
 257        return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
 258}
 259
 260static int setup_crypt_desc(void)
 261{
 262        struct device *dev = &pdev->dev;
 263        BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
 264        crypt_virt = dma_alloc_coherent(dev,
 265                        NPE_QLEN * sizeof(struct crypt_ctl),
 266                        &crypt_phys, GFP_ATOMIC);
 267        if (!crypt_virt)
 268                return -ENOMEM;
 269        memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
 270        return 0;
 271}
 272
 273static spinlock_t desc_lock;
 274static struct crypt_ctl *get_crypt_desc(void)
 275{
 276        int i;
 277        static int idx = 0;
 278        unsigned long flags;
 279
 280        spin_lock_irqsave(&desc_lock, flags);
 281
 282        if (unlikely(!crypt_virt))
 283                setup_crypt_desc();
 284        if (unlikely(!crypt_virt)) {
 285                spin_unlock_irqrestore(&desc_lock, flags);
 286                return NULL;
 287        }
 288        i = idx;
 289        if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
 290                if (++idx >= NPE_QLEN)
 291                        idx = 0;
 292                crypt_virt[i].ctl_flags = CTL_FLAG_USED;
 293                spin_unlock_irqrestore(&desc_lock, flags);
 294                return crypt_virt +i;
 295        } else {
 296                spin_unlock_irqrestore(&desc_lock, flags);
 297                return NULL;
 298        }
 299}
 300
 301static spinlock_t emerg_lock;
 302static struct crypt_ctl *get_crypt_desc_emerg(void)
 303{
 304        int i;
 305        static int idx = NPE_QLEN;
 306        struct crypt_ctl *desc;
 307        unsigned long flags;
 308
 309        desc = get_crypt_desc();
 310        if (desc)
 311                return desc;
 312        if (unlikely(!crypt_virt))
 313                return NULL;
 314
 315        spin_lock_irqsave(&emerg_lock, flags);
 316        i = idx;
 317        if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
 318                if (++idx >= NPE_QLEN_TOTAL)
 319                        idx = NPE_QLEN;
 320                crypt_virt[i].ctl_flags = CTL_FLAG_USED;
 321                spin_unlock_irqrestore(&emerg_lock, flags);
 322                return crypt_virt +i;
 323        } else {
 324                spin_unlock_irqrestore(&emerg_lock, flags);
 325                return NULL;
 326        }
 327}
 328
 329static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
 330{
 331        while (buf) {
 332                struct buffer_desc *buf1;
 333                u32 phys1;
 334
 335                buf1 = buf->next;
 336                phys1 = buf->phys_next;
 337                dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
 338                dma_pool_free(buffer_pool, buf, phys);
 339                buf = buf1;
 340                phys = phys1;
 341        }
 342}
 343
 344static struct tasklet_struct crypto_done_tasklet;
 345
 346static void finish_scattered_hmac(struct crypt_ctl *crypt)
 347{
 348        struct aead_request *req = crypt->data.aead_req;
 349        struct aead_ctx *req_ctx = aead_request_ctx(req);
 350        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 351        int authsize = crypto_aead_authsize(tfm);
 352        int decryptlen = req->assoclen + req->cryptlen - authsize;
 353
 354        if (req_ctx->encrypt) {
 355                scatterwalk_map_and_copy(req_ctx->hmac_virt,
 356                        req->dst, decryptlen, authsize, 1);
 357        }
 358        dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
 359}
 360
 361static void one_packet(dma_addr_t phys)
 362{
 363        struct device *dev = &pdev->dev;
 364        struct crypt_ctl *crypt;
 365        struct ixp_ctx *ctx;
 366        int failed;
 367
 368        failed = phys & 0x1 ? -EBADMSG : 0;
 369        phys &= ~0x3;
 370        crypt = crypt_phys2virt(phys);
 371
 372        switch (crypt->ctl_flags & CTL_FLAG_MASK) {
 373        case CTL_FLAG_PERFORM_AEAD: {
 374                struct aead_request *req = crypt->data.aead_req;
 375                struct aead_ctx *req_ctx = aead_request_ctx(req);
 376
 377                free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 378                free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 379                if (req_ctx->hmac_virt) {
 380                        finish_scattered_hmac(crypt);
 381                }
 382                req->base.complete(&req->base, failed);
 383                break;
 384        }
 385        case CTL_FLAG_PERFORM_ABLK: {
 386                struct ablkcipher_request *req = crypt->data.ablk_req;
 387                struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
 388
 389                if (req_ctx->dst) {
 390                        free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 391                }
 392                free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 393                req->base.complete(&req->base, failed);
 394                break;
 395        }
 396        case CTL_FLAG_GEN_ICV:
 397                ctx = crypto_tfm_ctx(crypt->data.tfm);
 398                dma_pool_free(ctx_pool, crypt->regist_ptr,
 399                                crypt->regist_buf->phys_addr);
 400                dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
 401                if (atomic_dec_and_test(&ctx->configuring))
 402                        complete(&ctx->completion);
 403                break;
 404        case CTL_FLAG_GEN_REVAES:
 405                ctx = crypto_tfm_ctx(crypt->data.tfm);
 406                *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
 407                if (atomic_dec_and_test(&ctx->configuring))
 408                        complete(&ctx->completion);
 409                break;
 410        default:
 411                BUG();
 412        }
 413        crypt->ctl_flags = CTL_FLAG_UNUSED;
 414}
 415
 416static void irqhandler(void *_unused)
 417{
 418        tasklet_schedule(&crypto_done_tasklet);
 419}
 420
 421static void crypto_done_action(unsigned long arg)
 422{
 423        int i;
 424
 425        for(i=0; i<4; i++) {
 426                dma_addr_t phys = qmgr_get_entry(RECV_QID);
 427                if (!phys)
 428                        return;
 429                one_packet(phys);
 430        }
 431        tasklet_schedule(&crypto_done_tasklet);
 432}
 433
 434static int init_ixp_crypto(struct device *dev)
 435{
 436        int ret = -ENODEV;
 437        u32 msg[2] = { 0, 0 };
 438
 439        if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
 440                                IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
 441                printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
 442                return ret;
 443        }
 444        npe_c = npe_request(NPE_ID);
 445        if (!npe_c)
 446                return ret;
 447
 448        if (!npe_running(npe_c)) {
 449                ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
 450                if (ret) {
 451                        return ret;
 452                }
 453                if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
 454                        goto npe_error;
 455        } else {
 456                if (npe_send_message(npe_c, msg, "STATUS_MSG"))
 457                        goto npe_error;
 458
 459                if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
 460                        goto npe_error;
 461        }
 462
 463        switch ((msg[1]>>16) & 0xff) {
 464        case 3:
 465                printk(KERN_WARNING "Firmware of %s lacks AES support\n",
 466                                npe_name(npe_c));
 467                support_aes = 0;
 468                break;
 469        case 4:
 470        case 5:
 471                support_aes = 1;
 472                break;
 473        default:
 474                printk(KERN_ERR "Firmware of %s lacks crypto support\n",
 475                        npe_name(npe_c));
 476                return -ENODEV;
 477        }
 478        /* buffer_pool will also be used to sometimes store the hmac,
 479         * so assure it is large enough
 480         */
 481        BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
 482        buffer_pool = dma_pool_create("buffer", dev,
 483                        sizeof(struct buffer_desc), 32, 0);
 484        ret = -ENOMEM;
 485        if (!buffer_pool) {
 486                goto err;
 487        }
 488        ctx_pool = dma_pool_create("context", dev,
 489                        NPE_CTX_LEN, 16, 0);
 490        if (!ctx_pool) {
 491                goto err;
 492        }
 493        ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
 494                                 "ixp_crypto:out", NULL);
 495        if (ret)
 496                goto err;
 497        ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
 498                                 "ixp_crypto:in", NULL);
 499        if (ret) {
 500                qmgr_release_queue(SEND_QID);
 501                goto err;
 502        }
 503        qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
 504        tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
 505
 506        qmgr_enable_irq(RECV_QID);
 507        return 0;
 508
 509npe_error:
 510        printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
 511        ret = -EIO;
 512err:
 513        dma_pool_destroy(ctx_pool);
 514        dma_pool_destroy(buffer_pool);
 515        npe_release(npe_c);
 516        return ret;
 517}
 518
 519static void release_ixp_crypto(struct device *dev)
 520{
 521        qmgr_disable_irq(RECV_QID);
 522        tasklet_kill(&crypto_done_tasklet);
 523
 524        qmgr_release_queue(SEND_QID);
 525        qmgr_release_queue(RECV_QID);
 526
 527        dma_pool_destroy(ctx_pool);
 528        dma_pool_destroy(buffer_pool);
 529
 530        npe_release(npe_c);
 531
 532        if (crypt_virt) {
 533                dma_free_coherent(dev,
 534                        NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
 535                        crypt_virt, crypt_phys);
 536        }
 537        return;
 538}
 539
 540static void reset_sa_dir(struct ix_sa_dir *dir)
 541{
 542        memset(dir->npe_ctx, 0, NPE_CTX_LEN);
 543        dir->npe_ctx_idx = 0;
 544        dir->npe_mode = 0;
 545}
 546
 547static int init_sa_dir(struct ix_sa_dir *dir)
 548{
 549        dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
 550        if (!dir->npe_ctx) {
 551                return -ENOMEM;
 552        }
 553        reset_sa_dir(dir);
 554        return 0;
 555}
 556
 557static void free_sa_dir(struct ix_sa_dir *dir)
 558{
 559        memset(dir->npe_ctx, 0, NPE_CTX_LEN);
 560        dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
 561}
 562
 563static int init_tfm(struct crypto_tfm *tfm)
 564{
 565        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 566        int ret;
 567
 568        atomic_set(&ctx->configuring, 0);
 569        ret = init_sa_dir(&ctx->encrypt);
 570        if (ret)
 571                return ret;
 572        ret = init_sa_dir(&ctx->decrypt);
 573        if (ret) {
 574                free_sa_dir(&ctx->encrypt);
 575        }
 576        return ret;
 577}
 578
 579static int init_tfm_ablk(struct crypto_tfm *tfm)
 580{
 581        tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
 582        return init_tfm(tfm);
 583}
 584
 585static int init_tfm_aead(struct crypto_aead *tfm)
 586{
 587        crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
 588        return init_tfm(crypto_aead_tfm(tfm));
 589}
 590
 591static void exit_tfm(struct crypto_tfm *tfm)
 592{
 593        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 594        free_sa_dir(&ctx->encrypt);
 595        free_sa_dir(&ctx->decrypt);
 596}
 597
 598static void exit_tfm_aead(struct crypto_aead *tfm)
 599{
 600        exit_tfm(crypto_aead_tfm(tfm));
 601}
 602
 603static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
 604                int init_len, u32 ctx_addr, const u8 *key, int key_len)
 605{
 606        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 607        struct crypt_ctl *crypt;
 608        struct buffer_desc *buf;
 609        int i;
 610        u8 *pad;
 611        u32 pad_phys, buf_phys;
 612
 613        BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
 614        pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
 615        if (!pad)
 616                return -ENOMEM;
 617        buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
 618        if (!buf) {
 619                dma_pool_free(ctx_pool, pad, pad_phys);
 620                return -ENOMEM;
 621        }
 622        crypt = get_crypt_desc_emerg();
 623        if (!crypt) {
 624                dma_pool_free(ctx_pool, pad, pad_phys);
 625                dma_pool_free(buffer_pool, buf, buf_phys);
 626                return -EAGAIN;
 627        }
 628
 629        memcpy(pad, key, key_len);
 630        memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
 631        for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
 632                pad[i] ^= xpad;
 633        }
 634
 635        crypt->data.tfm = tfm;
 636        crypt->regist_ptr = pad;
 637        crypt->regist_buf = buf;
 638
 639        crypt->auth_offs = 0;
 640        crypt->auth_len = HMAC_PAD_BLOCKLEN;
 641        crypt->crypto_ctx = ctx_addr;
 642        crypt->src_buf = buf_phys;
 643        crypt->icv_rev_aes = target;
 644        crypt->mode = NPE_OP_HASH_GEN_ICV;
 645        crypt->init_len = init_len;
 646        crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
 647
 648        buf->next = 0;
 649        buf->buf_len = HMAC_PAD_BLOCKLEN;
 650        buf->pkt_len = 0;
 651        buf->phys_addr = pad_phys;
 652
 653        atomic_inc(&ctx->configuring);
 654        qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 655        BUG_ON(qmgr_stat_overflow(SEND_QID));
 656        return 0;
 657}
 658
 659static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
 660                const u8 *key, int key_len, unsigned digest_len)
 661{
 662        u32 itarget, otarget, npe_ctx_addr;
 663        unsigned char *cinfo;
 664        int init_len, ret = 0;
 665        u32 cfgword;
 666        struct ix_sa_dir *dir;
 667        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 668        const struct ix_hash_algo *algo;
 669
 670        dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 671        cinfo = dir->npe_ctx + dir->npe_ctx_idx;
 672        algo = ix_hash(tfm);
 673
 674        /* write cfg word to cryptinfo */
 675        cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
 676#ifndef __ARMEB__
 677        cfgword ^= 0xAA000000; /* change the "byte swap" flags */
 678#endif
 679        *(u32*)cinfo = cpu_to_be32(cfgword);
 680        cinfo += sizeof(cfgword);
 681
 682        /* write ICV to cryptinfo */
 683        memcpy(cinfo, algo->icv, digest_len);
 684        cinfo += digest_len;
 685
 686        itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
 687                                + sizeof(algo->cfgword);
 688        otarget = itarget + digest_len;
 689        init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
 690        npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
 691
 692        dir->npe_ctx_idx += init_len;
 693        dir->npe_mode |= NPE_OP_HASH_ENABLE;
 694
 695        if (!encrypt)
 696                dir->npe_mode |= NPE_OP_HASH_VERIFY;
 697
 698        ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
 699                        init_len, npe_ctx_addr, key, key_len);
 700        if (ret)
 701                return ret;
 702        return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
 703                        init_len, npe_ctx_addr, key, key_len);
 704}
 705
 706static int gen_rev_aes_key(struct crypto_tfm *tfm)
 707{
 708        struct crypt_ctl *crypt;
 709        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 710        struct ix_sa_dir *dir = &ctx->decrypt;
 711
 712        crypt = get_crypt_desc_emerg();
 713        if (!crypt) {
 714                return -EAGAIN;
 715        }
 716        *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
 717
 718        crypt->data.tfm = tfm;
 719        crypt->crypt_offs = 0;
 720        crypt->crypt_len = AES_BLOCK128;
 721        crypt->src_buf = 0;
 722        crypt->crypto_ctx = dir->npe_ctx_phys;
 723        crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
 724        crypt->mode = NPE_OP_ENC_GEN_KEY;
 725        crypt->init_len = dir->npe_ctx_idx;
 726        crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
 727
 728        atomic_inc(&ctx->configuring);
 729        qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 730        BUG_ON(qmgr_stat_overflow(SEND_QID));
 731        return 0;
 732}
 733
 734static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
 735                const u8 *key, int key_len)
 736{
 737        u8 *cinfo;
 738        u32 cipher_cfg;
 739        u32 keylen_cfg = 0;
 740        struct ix_sa_dir *dir;
 741        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 742        u32 *flags = &tfm->crt_flags;
 743
 744        dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 745        cinfo = dir->npe_ctx;
 746
 747        if (encrypt) {
 748                cipher_cfg = cipher_cfg_enc(tfm);
 749                dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
 750        } else {
 751                cipher_cfg = cipher_cfg_dec(tfm);
 752        }
 753        if (cipher_cfg & MOD_AES) {
 754                switch (key_len) {
 755                case 16: keylen_cfg = MOD_AES128; break;
 756                case 24: keylen_cfg = MOD_AES192; break;
 757                case 32: keylen_cfg = MOD_AES256; break;
 758                default:
 759                        *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 760                        return -EINVAL;
 761                }
 762                cipher_cfg |= keylen_cfg;
 763        } else if (cipher_cfg & MOD_3DES) {
 764                const u32 *K = (const u32 *)key;
 765                if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
 766                             !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
 767                {
 768                        *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
 769                        return -EINVAL;
 770                }
 771        } else {
 772                u32 tmp[DES_EXPKEY_WORDS];
 773                if (des_ekey(tmp, key) == 0) {
 774                        *flags |= CRYPTO_TFM_RES_WEAK_KEY;
 775                }
 776        }
 777        /* write cfg word to cryptinfo */
 778        *(u32*)cinfo = cpu_to_be32(cipher_cfg);
 779        cinfo += sizeof(cipher_cfg);
 780
 781        /* write cipher key to cryptinfo */
 782        memcpy(cinfo, key, key_len);
 783        /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
 784        if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
 785                memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
 786                key_len = DES3_EDE_KEY_SIZE;
 787        }
 788        dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
 789        dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
 790        if ((cipher_cfg & MOD_AES) && !encrypt) {
 791                return gen_rev_aes_key(tfm);
 792        }
 793        return 0;
 794}
 795
 796static struct buffer_desc *chainup_buffers(struct device *dev,
 797                struct scatterlist *sg, unsigned nbytes,
 798                struct buffer_desc *buf, gfp_t flags,
 799                enum dma_data_direction dir)
 800{
 801        for (; nbytes > 0; sg = sg_next(sg)) {
 802                unsigned len = min(nbytes, sg->length);
 803                struct buffer_desc *next_buf;
 804                u32 next_buf_phys;
 805                void *ptr;
 806
 807                nbytes -= len;
 808                ptr = page_address(sg_page(sg)) + sg->offset;
 809                next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
 810                if (!next_buf) {
 811                        buf = NULL;
 812                        break;
 813                }
 814                sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
 815                buf->next = next_buf;
 816                buf->phys_next = next_buf_phys;
 817                buf = next_buf;
 818
 819                buf->phys_addr = sg_dma_address(sg);
 820                buf->buf_len = len;
 821                buf->dir = dir;
 822        }
 823        buf->next = NULL;
 824        buf->phys_next = 0;
 825        return buf;
 826}
 827
 828static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 829                        unsigned int key_len)
 830{
 831        struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 832        u32 *flags = &tfm->base.crt_flags;
 833        int ret;
 834
 835        init_completion(&ctx->completion);
 836        atomic_inc(&ctx->configuring);
 837
 838        reset_sa_dir(&ctx->encrypt);
 839        reset_sa_dir(&ctx->decrypt);
 840
 841        ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
 842        ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
 843
 844        ret = setup_cipher(&tfm->base, 0, key, key_len);
 845        if (ret)
 846                goto out;
 847        ret = setup_cipher(&tfm->base, 1, key, key_len);
 848        if (ret)
 849                goto out;
 850
 851        if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
 852                if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
 853                        ret = -EINVAL;
 854                } else {
 855                        *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
 856                }
 857        }
 858out:
 859        if (!atomic_dec_and_test(&ctx->configuring))
 860                wait_for_completion(&ctx->completion);
 861        return ret;
 862}
 863
 864static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 865                unsigned int key_len)
 866{
 867        struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 868
 869        /* the nonce is stored in bytes at end of key */
 870        if (key_len < CTR_RFC3686_NONCE_SIZE)
 871                return -EINVAL;
 872
 873        memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
 874                        CTR_RFC3686_NONCE_SIZE);
 875
 876        key_len -= CTR_RFC3686_NONCE_SIZE;
 877        return ablk_setkey(tfm, key, key_len);
 878}
 879
 880static int ablk_perform(struct ablkcipher_request *req, int encrypt)
 881{
 882        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 883        struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 884        unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
 885        struct ix_sa_dir *dir;
 886        struct crypt_ctl *crypt;
 887        unsigned int nbytes = req->nbytes;
 888        enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
 889        struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
 890        struct buffer_desc src_hook;
 891        struct device *dev = &pdev->dev;
 892        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 893                                GFP_KERNEL : GFP_ATOMIC;
 894
 895        if (qmgr_stat_full(SEND_QID))
 896                return -EAGAIN;
 897        if (atomic_read(&ctx->configuring))
 898                return -EAGAIN;
 899
 900        dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 901
 902        crypt = get_crypt_desc();
 903        if (!crypt)
 904                return -ENOMEM;
 905
 906        crypt->data.ablk_req = req;
 907        crypt->crypto_ctx = dir->npe_ctx_phys;
 908        crypt->mode = dir->npe_mode;
 909        crypt->init_len = dir->npe_ctx_idx;
 910
 911        crypt->crypt_offs = 0;
 912        crypt->crypt_len = nbytes;
 913
 914        BUG_ON(ivsize && !req->info);
 915        memcpy(crypt->iv, req->info, ivsize);
 916        if (req->src != req->dst) {
 917                struct buffer_desc dst_hook;
 918                crypt->mode |= NPE_OP_NOT_IN_PLACE;
 919                /* This was never tested by Intel
 920                 * for more than one dst buffer, I think. */
 921                req_ctx->dst = NULL;
 922                if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
 923                                        flags, DMA_FROM_DEVICE))
 924                        goto free_buf_dest;
 925                src_direction = DMA_TO_DEVICE;
 926                req_ctx->dst = dst_hook.next;
 927                crypt->dst_buf = dst_hook.phys_next;
 928        } else {
 929                req_ctx->dst = NULL;
 930        }
 931        req_ctx->src = NULL;
 932        if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
 933                                flags, src_direction))
 934                goto free_buf_src;
 935
 936        req_ctx->src = src_hook.next;
 937        crypt->src_buf = src_hook.phys_next;
 938        crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
 939        qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 940        BUG_ON(qmgr_stat_overflow(SEND_QID));
 941        return -EINPROGRESS;
 942
 943free_buf_src:
 944        free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 945free_buf_dest:
 946        if (req->src != req->dst) {
 947                free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 948        }
 949        crypt->ctl_flags = CTL_FLAG_UNUSED;
 950        return -ENOMEM;
 951}
 952
 953static int ablk_encrypt(struct ablkcipher_request *req)
 954{
 955        return ablk_perform(req, 1);
 956}
 957
 958static int ablk_decrypt(struct ablkcipher_request *req)
 959{
 960        return ablk_perform(req, 0);
 961}
 962
 963static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
 964{
 965        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 966        struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 967        u8 iv[CTR_RFC3686_BLOCK_SIZE];
 968        u8 *info = req->info;
 969        int ret;
 970
 971        /* set up counter block */
 972        memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
 973        memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
 974
 975        /* initialize counter portion of counter block */
 976        *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
 977                cpu_to_be32(1);
 978
 979        req->info = iv;
 980        ret = ablk_perform(req, 1);
 981        req->info = info;
 982        return ret;
 983}
 984
 985static int aead_perform(struct aead_request *req, int encrypt,
 986                int cryptoffset, int eff_cryptlen, u8 *iv)
 987{
 988        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 989        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
 990        unsigned ivsize = crypto_aead_ivsize(tfm);
 991        unsigned authsize = crypto_aead_authsize(tfm);
 992        struct ix_sa_dir *dir;
 993        struct crypt_ctl *crypt;
 994        unsigned int cryptlen;
 995        struct buffer_desc *buf, src_hook;
 996        struct aead_ctx *req_ctx = aead_request_ctx(req);
 997        struct device *dev = &pdev->dev;
 998        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 999                                GFP_KERNEL : GFP_ATOMIC;
1000        enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
1001        unsigned int lastlen;
1002
1003        if (qmgr_stat_full(SEND_QID))
1004                return -EAGAIN;
1005        if (atomic_read(&ctx->configuring))
1006                return -EAGAIN;
1007
1008        if (encrypt) {
1009                dir = &ctx->encrypt;
1010                cryptlen = req->cryptlen;
1011        } else {
1012                dir = &ctx->decrypt;
1013                /* req->cryptlen includes the authsize when decrypting */
1014                cryptlen = req->cryptlen -authsize;
1015                eff_cryptlen -= authsize;
1016        }
1017        crypt = get_crypt_desc();
1018        if (!crypt)
1019                return -ENOMEM;
1020
1021        crypt->data.aead_req = req;
1022        crypt->crypto_ctx = dir->npe_ctx_phys;
1023        crypt->mode = dir->npe_mode;
1024        crypt->init_len = dir->npe_ctx_idx;
1025
1026        crypt->crypt_offs = cryptoffset;
1027        crypt->crypt_len = eff_cryptlen;
1028
1029        crypt->auth_offs = 0;
1030        crypt->auth_len = req->assoclen + cryptlen;
1031        BUG_ON(ivsize && !req->iv);
1032        memcpy(crypt->iv, req->iv, ivsize);
1033
1034        buf = chainup_buffers(dev, req->src, crypt->auth_len,
1035                              &src_hook, flags, src_direction);
1036        req_ctx->src = src_hook.next;
1037        crypt->src_buf = src_hook.phys_next;
1038        if (!buf)
1039                goto free_buf_src;
1040
1041        lastlen = buf->buf_len;
1042        if (lastlen >= authsize)
1043                crypt->icv_rev_aes = buf->phys_addr +
1044                                     buf->buf_len - authsize;
1045
1046        req_ctx->dst = NULL;
1047
1048        if (req->src != req->dst) {
1049                struct buffer_desc dst_hook;
1050
1051                crypt->mode |= NPE_OP_NOT_IN_PLACE;
1052                src_direction = DMA_TO_DEVICE;
1053
1054                buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1055                                      &dst_hook, flags, DMA_FROM_DEVICE);
1056                req_ctx->dst = dst_hook.next;
1057                crypt->dst_buf = dst_hook.phys_next;
1058
1059                if (!buf)
1060                        goto free_buf_dst;
1061
1062                if (encrypt) {
1063                        lastlen = buf->buf_len;
1064                        if (lastlen >= authsize)
1065                                crypt->icv_rev_aes = buf->phys_addr +
1066                                                     buf->buf_len - authsize;
1067                }
1068        }
1069
1070        if (unlikely(lastlen < authsize)) {
1071                /* The 12 hmac bytes are scattered,
1072                 * we need to copy them into a safe buffer */
1073                req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1074                                &crypt->icv_rev_aes);
1075                if (unlikely(!req_ctx->hmac_virt))
1076                        goto free_buf_src;
1077                if (!encrypt) {
1078                        scatterwalk_map_and_copy(req_ctx->hmac_virt,
1079                                req->src, cryptlen, authsize, 0);
1080                }
1081                req_ctx->encrypt = encrypt;
1082        } else {
1083                req_ctx->hmac_virt = NULL;
1084        }
1085
1086        crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1087        qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1088        BUG_ON(qmgr_stat_overflow(SEND_QID));
1089        return -EINPROGRESS;
1090
1091free_buf_src:
1092        free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1093free_buf_dst:
1094        free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1095        crypt->ctl_flags = CTL_FLAG_UNUSED;
1096        return -ENOMEM;
1097}
1098
1099static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1100{
1101        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1102        u32 *flags = &tfm->base.crt_flags;
1103        unsigned digest_len = crypto_aead_maxauthsize(tfm);
1104        int ret;
1105
1106        if (!ctx->enckey_len && !ctx->authkey_len)
1107                return 0;
1108        init_completion(&ctx->completion);
1109        atomic_inc(&ctx->configuring);
1110
1111        reset_sa_dir(&ctx->encrypt);
1112        reset_sa_dir(&ctx->decrypt);
1113
1114        ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1115        if (ret)
1116                goto out;
1117        ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1118        if (ret)
1119                goto out;
1120        ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1121                        ctx->authkey_len, digest_len);
1122        if (ret)
1123                goto out;
1124        ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
1125                        ctx->authkey_len, digest_len);
1126        if (ret)
1127                goto out;
1128
1129        if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1130                if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1131                        ret = -EINVAL;
1132                        goto out;
1133                } else {
1134                        *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1135                }
1136        }
1137out:
1138        if (!atomic_dec_and_test(&ctx->configuring))
1139                wait_for_completion(&ctx->completion);
1140        return ret;
1141}
1142
1143static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1144{
1145        int max = crypto_aead_maxauthsize(tfm) >> 2;
1146
1147        if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1148                return -EINVAL;
1149        return aead_setup(tfm, authsize);
1150}
1151
1152static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1153                        unsigned int keylen)
1154{
1155        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1156        struct crypto_authenc_keys keys;
1157
1158        if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1159                goto badkey;
1160
1161        if (keys.authkeylen > sizeof(ctx->authkey))
1162                goto badkey;
1163
1164        if (keys.enckeylen > sizeof(ctx->enckey))
1165                goto badkey;
1166
1167        memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1168        memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1169        ctx->authkey_len = keys.authkeylen;
1170        ctx->enckey_len = keys.enckeylen;
1171
1172        return aead_setup(tfm, crypto_aead_authsize(tfm));
1173badkey:
1174        crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1175        return -EINVAL;
1176}
1177
1178static int aead_encrypt(struct aead_request *req)
1179{
1180        return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1181}
1182
1183static int aead_decrypt(struct aead_request *req)
1184{
1185        return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1186}
1187
1188static struct ixp_alg ixp4xx_algos[] = {
1189{
1190        .crypto = {
1191                .cra_name       = "cbc(des)",
1192                .cra_blocksize  = DES_BLOCK_SIZE,
1193                .cra_u          = { .ablkcipher = {
1194                        .min_keysize    = DES_KEY_SIZE,
1195                        .max_keysize    = DES_KEY_SIZE,
1196                        .ivsize         = DES_BLOCK_SIZE,
1197                        .geniv          = "eseqiv",
1198                        }
1199                }
1200        },
1201        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1202        .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1203
1204}, {
1205        .crypto = {
1206                .cra_name       = "ecb(des)",
1207                .cra_blocksize  = DES_BLOCK_SIZE,
1208                .cra_u          = { .ablkcipher = {
1209                        .min_keysize    = DES_KEY_SIZE,
1210                        .max_keysize    = DES_KEY_SIZE,
1211                        }
1212                }
1213        },
1214        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1215        .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1216}, {
1217        .crypto = {
1218                .cra_name       = "cbc(des3_ede)",
1219                .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
1220                .cra_u          = { .ablkcipher = {
1221                        .min_keysize    = DES3_EDE_KEY_SIZE,
1222                        .max_keysize    = DES3_EDE_KEY_SIZE,
1223                        .ivsize         = DES3_EDE_BLOCK_SIZE,
1224                        .geniv          = "eseqiv",
1225                        }
1226                }
1227        },
1228        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1229        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1230}, {
1231        .crypto = {
1232                .cra_name       = "ecb(des3_ede)",
1233                .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
1234                .cra_u          = { .ablkcipher = {
1235                        .min_keysize    = DES3_EDE_KEY_SIZE,
1236                        .max_keysize    = DES3_EDE_KEY_SIZE,
1237                        }
1238                }
1239        },
1240        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1241        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1242}, {
1243        .crypto = {
1244                .cra_name       = "cbc(aes)",
1245                .cra_blocksize  = AES_BLOCK_SIZE,
1246                .cra_u          = { .ablkcipher = {
1247                        .min_keysize    = AES_MIN_KEY_SIZE,
1248                        .max_keysize    = AES_MAX_KEY_SIZE,
1249                        .ivsize         = AES_BLOCK_SIZE,
1250                        .geniv          = "eseqiv",
1251                        }
1252                }
1253        },
1254        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1255        .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1256}, {
1257        .crypto = {
1258                .cra_name       = "ecb(aes)",
1259                .cra_blocksize  = AES_BLOCK_SIZE,
1260                .cra_u          = { .ablkcipher = {
1261                        .min_keysize    = AES_MIN_KEY_SIZE,
1262                        .max_keysize    = AES_MAX_KEY_SIZE,
1263                        }
1264                }
1265        },
1266        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1267        .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1268}, {
1269        .crypto = {
1270                .cra_name       = "ctr(aes)",
1271                .cra_blocksize  = AES_BLOCK_SIZE,
1272                .cra_u          = { .ablkcipher = {
1273                        .min_keysize    = AES_MIN_KEY_SIZE,
1274                        .max_keysize    = AES_MAX_KEY_SIZE,
1275                        .ivsize         = AES_BLOCK_SIZE,
1276                        .geniv          = "eseqiv",
1277                        }
1278                }
1279        },
1280        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1281        .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1282}, {
1283        .crypto = {
1284                .cra_name       = "rfc3686(ctr(aes))",
1285                .cra_blocksize  = AES_BLOCK_SIZE,
1286                .cra_u          = { .ablkcipher = {
1287                        .min_keysize    = AES_MIN_KEY_SIZE,
1288                        .max_keysize    = AES_MAX_KEY_SIZE,
1289                        .ivsize         = AES_BLOCK_SIZE,
1290                        .geniv          = "eseqiv",
1291                        .setkey         = ablk_rfc3686_setkey,
1292                        .encrypt        = ablk_rfc3686_crypt,
1293                        .decrypt        = ablk_rfc3686_crypt }
1294                }
1295        },
1296        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1297        .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1298} };
1299
1300static struct ixp_aead_alg ixp4xx_aeads[] = {
1301{
1302        .crypto = {
1303                .base = {
1304                        .cra_name       = "authenc(hmac(md5),cbc(des))",
1305                        .cra_blocksize  = DES_BLOCK_SIZE,
1306                },
1307                .ivsize         = DES_BLOCK_SIZE,
1308                .maxauthsize    = MD5_DIGEST_SIZE,
1309        },
1310        .hash = &hash_alg_md5,
1311        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1312        .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1313}, {
1314        .crypto = {
1315                .base = {
1316                        .cra_name       = "authenc(hmac(md5),cbc(des3_ede))",
1317                        .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
1318                },
1319                .ivsize         = DES3_EDE_BLOCK_SIZE,
1320                .maxauthsize    = MD5_DIGEST_SIZE,
1321        },
1322        .hash = &hash_alg_md5,
1323        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1324        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1325}, {
1326        .crypto = {
1327                .base = {
1328                        .cra_name       = "authenc(hmac(sha1),cbc(des))",
1329                        .cra_blocksize  = DES_BLOCK_SIZE,
1330                },
1331                        .ivsize         = DES_BLOCK_SIZE,
1332                        .maxauthsize    = SHA1_DIGEST_SIZE,
1333        },
1334        .hash = &hash_alg_sha1,
1335        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1336        .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1337}, {
1338        .crypto = {
1339                .base = {
1340                        .cra_name       = "authenc(hmac(sha1),cbc(des3_ede))",
1341                        .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
1342                },
1343                .ivsize         = DES3_EDE_BLOCK_SIZE,
1344                .maxauthsize    = SHA1_DIGEST_SIZE,
1345        },
1346        .hash = &hash_alg_sha1,
1347        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1348        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1349}, {
1350        .crypto = {
1351                .base = {
1352                        .cra_name       = "authenc(hmac(md5),cbc(aes))",
1353                        .cra_blocksize  = AES_BLOCK_SIZE,
1354                },
1355                .ivsize         = AES_BLOCK_SIZE,
1356                .maxauthsize    = MD5_DIGEST_SIZE,
1357        },
1358        .hash = &hash_alg_md5,
1359        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1360        .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1361}, {
1362        .crypto = {
1363                .base = {
1364                        .cra_name       = "authenc(hmac(sha1),cbc(aes))",
1365                        .cra_blocksize  = AES_BLOCK_SIZE,
1366                },
1367                .ivsize         = AES_BLOCK_SIZE,
1368                .maxauthsize    = SHA1_DIGEST_SIZE,
1369        },
1370        .hash = &hash_alg_sha1,
1371        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1372        .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1373} };
1374
1375#define IXP_POSTFIX "-ixp4xx"
1376
1377static const struct platform_device_info ixp_dev_info __initdata = {
1378        .name           = DRIVER_NAME,
1379        .id             = 0,
1380        .dma_mask       = DMA_BIT_MASK(32),
1381};
1382
1383static int __init ixp_module_init(void)
1384{
1385        int num = ARRAY_SIZE(ixp4xx_algos);
1386        int i, err;
1387
1388        pdev = platform_device_register_full(&ixp_dev_info);
1389        if (IS_ERR(pdev))
1390                return PTR_ERR(pdev);
1391
1392        spin_lock_init(&desc_lock);
1393        spin_lock_init(&emerg_lock);
1394
1395        err = init_ixp_crypto(&pdev->dev);
1396        if (err) {
1397                platform_device_unregister(pdev);
1398                return err;
1399        }
1400        for (i=0; i< num; i++) {
1401                struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1402
1403                if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1404                        "%s"IXP_POSTFIX, cra->cra_name) >=
1405                        CRYPTO_MAX_ALG_NAME)
1406                {
1407                        continue;
1408                }
1409                if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1410                        continue;
1411                }
1412
1413                /* block ciphers */
1414                cra->cra_type = &crypto_ablkcipher_type;
1415                cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1416                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
1417                                 CRYPTO_ALG_ASYNC;
1418                if (!cra->cra_ablkcipher.setkey)
1419                        cra->cra_ablkcipher.setkey = ablk_setkey;
1420                if (!cra->cra_ablkcipher.encrypt)
1421                        cra->cra_ablkcipher.encrypt = ablk_encrypt;
1422                if (!cra->cra_ablkcipher.decrypt)
1423                        cra->cra_ablkcipher.decrypt = ablk_decrypt;
1424                cra->cra_init = init_tfm_ablk;
1425
1426                cra->cra_ctxsize = sizeof(struct ixp_ctx);
1427                cra->cra_module = THIS_MODULE;
1428                cra->cra_alignmask = 3;
1429                cra->cra_priority = 300;
1430                cra->cra_exit = exit_tfm;
1431                if (crypto_register_alg(cra))
1432                        printk(KERN_ERR "Failed to register '%s'\n",
1433                                cra->cra_name);
1434                else
1435                        ixp4xx_algos[i].registered = 1;
1436        }
1437
1438        for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1439                struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1440
1441                if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1442                             "%s"IXP_POSTFIX, cra->base.cra_name) >=
1443                    CRYPTO_MAX_ALG_NAME)
1444                        continue;
1445                if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1446                        continue;
1447
1448                /* authenc */
1449                cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1450                                      CRYPTO_ALG_ASYNC;
1451                cra->setkey = aead_setkey;
1452                cra->setauthsize = aead_setauthsize;
1453                cra->encrypt = aead_encrypt;
1454                cra->decrypt = aead_decrypt;
1455                cra->init = init_tfm_aead;
1456                cra->exit = exit_tfm_aead;
1457
1458                cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1459                cra->base.cra_module = THIS_MODULE;
1460                cra->base.cra_alignmask = 3;
1461                cra->base.cra_priority = 300;
1462
1463                if (crypto_register_aead(cra))
1464                        printk(KERN_ERR "Failed to register '%s'\n",
1465                                cra->base.cra_driver_name);
1466                else
1467                        ixp4xx_aeads[i].registered = 1;
1468        }
1469        return 0;
1470}
1471
1472static void __exit ixp_module_exit(void)
1473{
1474        int num = ARRAY_SIZE(ixp4xx_algos);
1475        int i;
1476
1477        for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1478                if (ixp4xx_aeads[i].registered)
1479                        crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1480        }
1481
1482        for (i=0; i< num; i++) {
1483                if (ixp4xx_algos[i].registered)
1484                        crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1485        }
1486        release_ixp_crypto(&pdev->dev);
1487        platform_device_unregister(pdev);
1488}
1489
1490module_init(ixp_module_init);
1491module_exit(ixp_module_exit);
1492
1493MODULE_LICENSE("GPL");
1494MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1495MODULE_DESCRIPTION("IXP4xx hardware crypto");
1496
1497