linux/drivers/crypto/ixp4xx_crypto.c
<<
>>
Prefs
   1/*
   2 * Intel IXP4xx NPE-C crypto driver
   3 *
   4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of version 2 of the GNU General Public License
   8 * as published by the Free Software Foundation.
   9 *
  10 */
  11
  12#include <linux/platform_device.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/dmapool.h>
  15#include <linux/crypto.h>
  16#include <linux/kernel.h>
  17#include <linux/rtnetlink.h>
  18#include <linux/interrupt.h>
  19#include <linux/spinlock.h>
  20
  21#include <crypto/ctr.h>
  22#include <crypto/des.h>
  23#include <crypto/aes.h>
  24#include <crypto/sha.h>
  25#include <crypto/algapi.h>
  26#include <crypto/aead.h>
  27#include <crypto/authenc.h>
  28#include <crypto/scatterwalk.h>
  29
  30#include <mach/npe.h>
  31#include <mach/qmgr.h>
  32
  33#define MAX_KEYLEN 32
  34
  35/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
  36#define NPE_CTX_LEN 80
  37#define AES_BLOCK128 16
  38
  39#define NPE_OP_HASH_VERIFY   0x01
  40#define NPE_OP_CCM_ENABLE    0x04
  41#define NPE_OP_CRYPT_ENABLE  0x08
  42#define NPE_OP_HASH_ENABLE   0x10
  43#define NPE_OP_NOT_IN_PLACE  0x20
  44#define NPE_OP_HMAC_DISABLE  0x40
  45#define NPE_OP_CRYPT_ENCRYPT 0x80
  46
  47#define NPE_OP_CCM_GEN_MIC   0xcc
  48#define NPE_OP_HASH_GEN_ICV  0x50
  49#define NPE_OP_ENC_GEN_KEY   0xc9
  50
  51#define MOD_ECB     0x0000
  52#define MOD_CTR     0x1000
  53#define MOD_CBC_ENC 0x2000
  54#define MOD_CBC_DEC 0x3000
  55#define MOD_CCM_ENC 0x4000
  56#define MOD_CCM_DEC 0x5000
  57
  58#define KEYLEN_128  4
  59#define KEYLEN_192  6
  60#define KEYLEN_256  8
  61
  62#define CIPH_DECR   0x0000
  63#define CIPH_ENCR   0x0400
  64
  65#define MOD_DES     0x0000
  66#define MOD_TDEA2   0x0100
  67#define MOD_3DES   0x0200
  68#define MOD_AES     0x0800
  69#define MOD_AES128  (0x0800 | KEYLEN_128)
  70#define MOD_AES192  (0x0900 | KEYLEN_192)
  71#define MOD_AES256  (0x0a00 | KEYLEN_256)
  72
  73#define MAX_IVLEN   16
  74#define NPE_ID      2  /* NPE C */
  75#define NPE_QLEN    16
  76/* Space for registering when the first
  77 * NPE_QLEN crypt_ctl are busy */
  78#define NPE_QLEN_TOTAL 64
  79
  80#define SEND_QID    29
  81#define RECV_QID    30
  82
  83#define CTL_FLAG_UNUSED         0x0000
  84#define CTL_FLAG_USED           0x1000
  85#define CTL_FLAG_PERFORM_ABLK   0x0001
  86#define CTL_FLAG_GEN_ICV        0x0002
  87#define CTL_FLAG_GEN_REVAES     0x0004
  88#define CTL_FLAG_PERFORM_AEAD   0x0008
  89#define CTL_FLAG_MASK           0x000f
  90
  91#define HMAC_IPAD_VALUE   0x36
  92#define HMAC_OPAD_VALUE   0x5C
  93#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
  94
  95#define MD5_DIGEST_SIZE   16
  96
  97struct buffer_desc {
  98        u32 phys_next;
  99        u16 buf_len;
 100        u16 pkt_len;
 101        u32 phys_addr;
 102        u32 __reserved[4];
 103        struct buffer_desc *next;
 104        enum dma_data_direction dir;
 105};
 106
 107struct crypt_ctl {
 108        u8 mode;                /* NPE_OP_*  operation mode */
 109        u8 init_len;
 110        u16 reserved;
 111        u8 iv[MAX_IVLEN];       /* IV for CBC mode or CTR IV for CTR mode */
 112        u32 icv_rev_aes;        /* icv or rev aes */
 113        u32 src_buf;
 114        u32 dst_buf;
 115        u16 auth_offs;          /* Authentication start offset */
 116        u16 auth_len;           /* Authentication data length */
 117        u16 crypt_offs;         /* Cryption start offset */
 118        u16 crypt_len;          /* Cryption data length */
 119        u32 aadAddr;            /* Additional Auth Data Addr for CCM mode */
 120        u32 crypto_ctx;         /* NPE Crypto Param structure address */
 121
 122        /* Used by Host: 4*4 bytes*/
 123        unsigned ctl_flags;
 124        union {
 125                struct ablkcipher_request *ablk_req;
 126                struct aead_request *aead_req;
 127                struct crypto_tfm *tfm;
 128        } data;
 129        struct buffer_desc *regist_buf;
 130        u8 *regist_ptr;
 131};
 132
 133struct ablk_ctx {
 134        struct buffer_desc *src;
 135        struct buffer_desc *dst;
 136};
 137
 138struct aead_ctx {
 139        struct buffer_desc *buffer;
 140        struct scatterlist ivlist;
 141        /* used when the hmac is not on one sg entry */
 142        u8 *hmac_virt;
 143        int encrypt;
 144};
 145
 146struct ix_hash_algo {
 147        u32 cfgword;
 148        unsigned char *icv;
 149};
 150
 151struct ix_sa_dir {
 152        unsigned char *npe_ctx;
 153        dma_addr_t npe_ctx_phys;
 154        int npe_ctx_idx;
 155        u8 npe_mode;
 156};
 157
 158struct ixp_ctx {
 159        struct ix_sa_dir encrypt;
 160        struct ix_sa_dir decrypt;
 161        int authkey_len;
 162        u8 authkey[MAX_KEYLEN];
 163        int enckey_len;
 164        u8 enckey[MAX_KEYLEN];
 165        u8 salt[MAX_IVLEN];
 166        u8 nonce[CTR_RFC3686_NONCE_SIZE];
 167        unsigned salted;
 168        atomic_t configuring;
 169        struct completion completion;
 170};
 171
 172struct ixp_alg {
 173        struct crypto_alg crypto;
 174        const struct ix_hash_algo *hash;
 175        u32 cfg_enc;
 176        u32 cfg_dec;
 177
 178        int registered;
 179};
 180
 181static const struct ix_hash_algo hash_alg_md5 = {
 182        .cfgword        = 0xAA010004,
 183        .icv            = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
 184                          "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
 185};
 186static const struct ix_hash_algo hash_alg_sha1 = {
 187        .cfgword        = 0x00000005,
 188        .icv            = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
 189                          "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
 190};
 191
 192static struct npe *npe_c;
 193static struct dma_pool *buffer_pool = NULL;
 194static struct dma_pool *ctx_pool = NULL;
 195
 196static struct crypt_ctl *crypt_virt = NULL;
 197static dma_addr_t crypt_phys;
 198
 199static int support_aes = 1;
 200
 201static void dev_release(struct device *dev)
 202{
 203        return;
 204}
 205
 206#define DRIVER_NAME "ixp4xx_crypto"
 207static struct platform_device pseudo_dev = {
 208        .name = DRIVER_NAME,
 209        .id   = 0,
 210        .num_resources = 0,
 211        .dev  = {
 212                .coherent_dma_mask = DMA_BIT_MASK(32),
 213                .release = dev_release,
 214        }
 215};
 216
 217static struct device *dev = &pseudo_dev.dev;
 218
 219static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
 220{
 221        return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
 222}
 223
 224static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
 225{
 226        return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
 227}
 228
 229static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
 230{
 231        return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
 232}
 233
 234static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
 235{
 236        return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
 237}
 238
 239static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
 240{
 241        return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
 242}
 243
 244static int setup_crypt_desc(void)
 245{
 246        BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
 247        crypt_virt = dma_alloc_coherent(dev,
 248                        NPE_QLEN * sizeof(struct crypt_ctl),
 249                        &crypt_phys, GFP_KERNEL);
 250        if (!crypt_virt)
 251                return -ENOMEM;
 252        memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
 253        return 0;
 254}
 255
 256static spinlock_t desc_lock;
 257static struct crypt_ctl *get_crypt_desc(void)
 258{
 259        int i;
 260        static int idx = 0;
 261        unsigned long flags;
 262
 263        spin_lock_irqsave(&desc_lock, flags);
 264
 265        if (unlikely(!crypt_virt))
 266                setup_crypt_desc();
 267        if (unlikely(!crypt_virt)) {
 268                spin_unlock_irqrestore(&desc_lock, flags);
 269                return NULL;
 270        }
 271        i = idx;
 272        if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
 273                if (++idx >= NPE_QLEN)
 274                        idx = 0;
 275                crypt_virt[i].ctl_flags = CTL_FLAG_USED;
 276                spin_unlock_irqrestore(&desc_lock, flags);
 277                return crypt_virt +i;
 278        } else {
 279                spin_unlock_irqrestore(&desc_lock, flags);
 280                return NULL;
 281        }
 282}
 283
 284static spinlock_t emerg_lock;
 285static struct crypt_ctl *get_crypt_desc_emerg(void)
 286{
 287        int i;
 288        static int idx = NPE_QLEN;
 289        struct crypt_ctl *desc;
 290        unsigned long flags;
 291
 292        desc = get_crypt_desc();
 293        if (desc)
 294                return desc;
 295        if (unlikely(!crypt_virt))
 296                return NULL;
 297
 298        spin_lock_irqsave(&emerg_lock, flags);
 299        i = idx;
 300        if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
 301                if (++idx >= NPE_QLEN_TOTAL)
 302                        idx = NPE_QLEN;
 303                crypt_virt[i].ctl_flags = CTL_FLAG_USED;
 304                spin_unlock_irqrestore(&emerg_lock, flags);
 305                return crypt_virt +i;
 306        } else {
 307                spin_unlock_irqrestore(&emerg_lock, flags);
 308                return NULL;
 309        }
 310}
 311
 312static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
 313{
 314        while (buf) {
 315                struct buffer_desc *buf1;
 316                u32 phys1;
 317
 318                buf1 = buf->next;
 319                phys1 = buf->phys_next;
 320                dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
 321                dma_pool_free(buffer_pool, buf, phys);
 322                buf = buf1;
 323                phys = phys1;
 324        }
 325}
 326
 327static struct tasklet_struct crypto_done_tasklet;
 328
 329static void finish_scattered_hmac(struct crypt_ctl *crypt)
 330{
 331        struct aead_request *req = crypt->data.aead_req;
 332        struct aead_ctx *req_ctx = aead_request_ctx(req);
 333        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 334        int authsize = crypto_aead_authsize(tfm);
 335        int decryptlen = req->cryptlen - authsize;
 336
 337        if (req_ctx->encrypt) {
 338                scatterwalk_map_and_copy(req_ctx->hmac_virt,
 339                        req->src, decryptlen, authsize, 1);
 340        }
 341        dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
 342}
 343
 344static void one_packet(dma_addr_t phys)
 345{
 346        struct crypt_ctl *crypt;
 347        struct ixp_ctx *ctx;
 348        int failed;
 349
 350        failed = phys & 0x1 ? -EBADMSG : 0;
 351        phys &= ~0x3;
 352        crypt = crypt_phys2virt(phys);
 353
 354        switch (crypt->ctl_flags & CTL_FLAG_MASK) {
 355        case CTL_FLAG_PERFORM_AEAD: {
 356                struct aead_request *req = crypt->data.aead_req;
 357                struct aead_ctx *req_ctx = aead_request_ctx(req);
 358
 359                free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
 360                if (req_ctx->hmac_virt) {
 361                        finish_scattered_hmac(crypt);
 362                }
 363                req->base.complete(&req->base, failed);
 364                break;
 365        }
 366        case CTL_FLAG_PERFORM_ABLK: {
 367                struct ablkcipher_request *req = crypt->data.ablk_req;
 368                struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
 369
 370                if (req_ctx->dst) {
 371                        free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 372                }
 373                free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 374                req->base.complete(&req->base, failed);
 375                break;
 376        }
 377        case CTL_FLAG_GEN_ICV:
 378                ctx = crypto_tfm_ctx(crypt->data.tfm);
 379                dma_pool_free(ctx_pool, crypt->regist_ptr,
 380                                crypt->regist_buf->phys_addr);
 381                dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
 382                if (atomic_dec_and_test(&ctx->configuring))
 383                        complete(&ctx->completion);
 384                break;
 385        case CTL_FLAG_GEN_REVAES:
 386                ctx = crypto_tfm_ctx(crypt->data.tfm);
 387                *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
 388                if (atomic_dec_and_test(&ctx->configuring))
 389                        complete(&ctx->completion);
 390                break;
 391        default:
 392                BUG();
 393        }
 394        crypt->ctl_flags = CTL_FLAG_UNUSED;
 395}
 396
 397static void irqhandler(void *_unused)
 398{
 399        tasklet_schedule(&crypto_done_tasklet);
 400}
 401
 402static void crypto_done_action(unsigned long arg)
 403{
 404        int i;
 405
 406        for(i=0; i<4; i++) {
 407                dma_addr_t phys = qmgr_get_entry(RECV_QID);
 408                if (!phys)
 409                        return;
 410                one_packet(phys);
 411        }
 412        tasklet_schedule(&crypto_done_tasklet);
 413}
 414
 415static int init_ixp_crypto(void)
 416{
 417        int ret = -ENODEV;
 418        u32 msg[2] = { 0, 0 };
 419
 420        if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
 421                                IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
 422                printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
 423                return ret;
 424        }
 425        npe_c = npe_request(NPE_ID);
 426        if (!npe_c)
 427                return ret;
 428
 429        if (!npe_running(npe_c)) {
 430                ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
 431                if (ret) {
 432                        return ret;
 433                }
 434                if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
 435                        goto npe_error;
 436        } else {
 437                if (npe_send_message(npe_c, msg, "STATUS_MSG"))
 438                        goto npe_error;
 439
 440                if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
 441                        goto npe_error;
 442        }
 443
 444        switch ((msg[1]>>16) & 0xff) {
 445        case 3:
 446                printk(KERN_WARNING "Firmware of %s lacks AES support\n",
 447                                npe_name(npe_c));
 448                support_aes = 0;
 449                break;
 450        case 4:
 451        case 5:
 452                support_aes = 1;
 453                break;
 454        default:
 455                printk(KERN_ERR "Firmware of %s lacks crypto support\n",
 456                        npe_name(npe_c));
 457                return -ENODEV;
 458        }
 459        /* buffer_pool will also be used to sometimes store the hmac,
 460         * so assure it is large enough
 461         */
 462        BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
 463        buffer_pool = dma_pool_create("buffer", dev,
 464                        sizeof(struct buffer_desc), 32, 0);
 465        ret = -ENOMEM;
 466        if (!buffer_pool) {
 467                goto err;
 468        }
 469        ctx_pool = dma_pool_create("context", dev,
 470                        NPE_CTX_LEN, 16, 0);
 471        if (!ctx_pool) {
 472                goto err;
 473        }
 474        ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
 475                                 "ixp_crypto:out", NULL);
 476        if (ret)
 477                goto err;
 478        ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
 479                                 "ixp_crypto:in", NULL);
 480        if (ret) {
 481                qmgr_release_queue(SEND_QID);
 482                goto err;
 483        }
 484        qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
 485        tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
 486
 487        qmgr_enable_irq(RECV_QID);
 488        return 0;
 489
 490npe_error:
 491        printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
 492        ret = -EIO;
 493err:
 494        if (ctx_pool)
 495                dma_pool_destroy(ctx_pool);
 496        if (buffer_pool)
 497                dma_pool_destroy(buffer_pool);
 498        npe_release(npe_c);
 499        return ret;
 500}
 501
 502static void release_ixp_crypto(void)
 503{
 504        qmgr_disable_irq(RECV_QID);
 505        tasklet_kill(&crypto_done_tasklet);
 506
 507        qmgr_release_queue(SEND_QID);
 508        qmgr_release_queue(RECV_QID);
 509
 510        dma_pool_destroy(ctx_pool);
 511        dma_pool_destroy(buffer_pool);
 512
 513        npe_release(npe_c);
 514
 515        if (crypt_virt) {
 516                dma_free_coherent(dev,
 517                        NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
 518                        crypt_virt, crypt_phys);
 519        }
 520        return;
 521}
 522
 523static void reset_sa_dir(struct ix_sa_dir *dir)
 524{
 525        memset(dir->npe_ctx, 0, NPE_CTX_LEN);
 526        dir->npe_ctx_idx = 0;
 527        dir->npe_mode = 0;
 528}
 529
 530static int init_sa_dir(struct ix_sa_dir *dir)
 531{
 532        dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
 533        if (!dir->npe_ctx) {
 534                return -ENOMEM;
 535        }
 536        reset_sa_dir(dir);
 537        return 0;
 538}
 539
 540static void free_sa_dir(struct ix_sa_dir *dir)
 541{
 542        memset(dir->npe_ctx, 0, NPE_CTX_LEN);
 543        dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
 544}
 545
 546static int init_tfm(struct crypto_tfm *tfm)
 547{
 548        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 549        int ret;
 550
 551        atomic_set(&ctx->configuring, 0);
 552        ret = init_sa_dir(&ctx->encrypt);
 553        if (ret)
 554                return ret;
 555        ret = init_sa_dir(&ctx->decrypt);
 556        if (ret) {
 557                free_sa_dir(&ctx->encrypt);
 558        }
 559        return ret;
 560}
 561
 562static int init_tfm_ablk(struct crypto_tfm *tfm)
 563{
 564        tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
 565        return init_tfm(tfm);
 566}
 567
 568static int init_tfm_aead(struct crypto_tfm *tfm)
 569{
 570        tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
 571        return init_tfm(tfm);
 572}
 573
 574static void exit_tfm(struct crypto_tfm *tfm)
 575{
 576        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 577        free_sa_dir(&ctx->encrypt);
 578        free_sa_dir(&ctx->decrypt);
 579}
 580
 581static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
 582                int init_len, u32 ctx_addr, const u8 *key, int key_len)
 583{
 584        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 585        struct crypt_ctl *crypt;
 586        struct buffer_desc *buf;
 587        int i;
 588        u8 *pad;
 589        u32 pad_phys, buf_phys;
 590
 591        BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
 592        pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
 593        if (!pad)
 594                return -ENOMEM;
 595        buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
 596        if (!buf) {
 597                dma_pool_free(ctx_pool, pad, pad_phys);
 598                return -ENOMEM;
 599        }
 600        crypt = get_crypt_desc_emerg();
 601        if (!crypt) {
 602                dma_pool_free(ctx_pool, pad, pad_phys);
 603                dma_pool_free(buffer_pool, buf, buf_phys);
 604                return -EAGAIN;
 605        }
 606
 607        memcpy(pad, key, key_len);
 608        memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
 609        for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
 610                pad[i] ^= xpad;
 611        }
 612
 613        crypt->data.tfm = tfm;
 614        crypt->regist_ptr = pad;
 615        crypt->regist_buf = buf;
 616
 617        crypt->auth_offs = 0;
 618        crypt->auth_len = HMAC_PAD_BLOCKLEN;
 619        crypt->crypto_ctx = ctx_addr;
 620        crypt->src_buf = buf_phys;
 621        crypt->icv_rev_aes = target;
 622        crypt->mode = NPE_OP_HASH_GEN_ICV;
 623        crypt->init_len = init_len;
 624        crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
 625
 626        buf->next = 0;
 627        buf->buf_len = HMAC_PAD_BLOCKLEN;
 628        buf->pkt_len = 0;
 629        buf->phys_addr = pad_phys;
 630
 631        atomic_inc(&ctx->configuring);
 632        qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 633        BUG_ON(qmgr_stat_overflow(SEND_QID));
 634        return 0;
 635}
 636
 637static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
 638                const u8 *key, int key_len, unsigned digest_len)
 639{
 640        u32 itarget, otarget, npe_ctx_addr;
 641        unsigned char *cinfo;
 642        int init_len, ret = 0;
 643        u32 cfgword;
 644        struct ix_sa_dir *dir;
 645        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 646        const struct ix_hash_algo *algo;
 647
 648        dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 649        cinfo = dir->npe_ctx + dir->npe_ctx_idx;
 650        algo = ix_hash(tfm);
 651
 652        /* write cfg word to cryptinfo */
 653        cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
 654        *(u32*)cinfo = cpu_to_be32(cfgword);
 655        cinfo += sizeof(cfgword);
 656
 657        /* write ICV to cryptinfo */
 658        memcpy(cinfo, algo->icv, digest_len);
 659        cinfo += digest_len;
 660
 661        itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
 662                                + sizeof(algo->cfgword);
 663        otarget = itarget + digest_len;
 664        init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
 665        npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
 666
 667        dir->npe_ctx_idx += init_len;
 668        dir->npe_mode |= NPE_OP_HASH_ENABLE;
 669
 670        if (!encrypt)
 671                dir->npe_mode |= NPE_OP_HASH_VERIFY;
 672
 673        ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
 674                        init_len, npe_ctx_addr, key, key_len);
 675        if (ret)
 676                return ret;
 677        return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
 678                        init_len, npe_ctx_addr, key, key_len);
 679}
 680
 681static int gen_rev_aes_key(struct crypto_tfm *tfm)
 682{
 683        struct crypt_ctl *crypt;
 684        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 685        struct ix_sa_dir *dir = &ctx->decrypt;
 686
 687        crypt = get_crypt_desc_emerg();
 688        if (!crypt) {
 689                return -EAGAIN;
 690        }
 691        *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
 692
 693        crypt->data.tfm = tfm;
 694        crypt->crypt_offs = 0;
 695        crypt->crypt_len = AES_BLOCK128;
 696        crypt->src_buf = 0;
 697        crypt->crypto_ctx = dir->npe_ctx_phys;
 698        crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
 699        crypt->mode = NPE_OP_ENC_GEN_KEY;
 700        crypt->init_len = dir->npe_ctx_idx;
 701        crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
 702
 703        atomic_inc(&ctx->configuring);
 704        qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 705        BUG_ON(qmgr_stat_overflow(SEND_QID));
 706        return 0;
 707}
 708
 709static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
 710                const u8 *key, int key_len)
 711{
 712        u8 *cinfo;
 713        u32 cipher_cfg;
 714        u32 keylen_cfg = 0;
 715        struct ix_sa_dir *dir;
 716        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 717        u32 *flags = &tfm->crt_flags;
 718
 719        dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 720        cinfo = dir->npe_ctx;
 721
 722        if (encrypt) {
 723                cipher_cfg = cipher_cfg_enc(tfm);
 724                dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
 725        } else {
 726                cipher_cfg = cipher_cfg_dec(tfm);
 727        }
 728        if (cipher_cfg & MOD_AES) {
 729                switch (key_len) {
 730                        case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
 731                        case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
 732                        case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
 733                        default:
 734                                *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 735                                return -EINVAL;
 736                }
 737                cipher_cfg |= keylen_cfg;
 738        } else if (cipher_cfg & MOD_3DES) {
 739                const u32 *K = (const u32 *)key;
 740                if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
 741                             !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
 742                {
 743                        *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
 744                        return -EINVAL;
 745                }
 746        } else {
 747                u32 tmp[DES_EXPKEY_WORDS];
 748                if (des_ekey(tmp, key) == 0) {
 749                        *flags |= CRYPTO_TFM_RES_WEAK_KEY;
 750                }
 751        }
 752        /* write cfg word to cryptinfo */
 753        *(u32*)cinfo = cpu_to_be32(cipher_cfg);
 754        cinfo += sizeof(cipher_cfg);
 755
 756        /* write cipher key to cryptinfo */
 757        memcpy(cinfo, key, key_len);
 758        /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
 759        if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
 760                memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
 761                key_len = DES3_EDE_KEY_SIZE;
 762        }
 763        dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
 764        dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
 765        if ((cipher_cfg & MOD_AES) && !encrypt) {
 766                return gen_rev_aes_key(tfm);
 767        }
 768        return 0;
 769}
 770
 771static struct buffer_desc *chainup_buffers(struct device *dev,
 772                struct scatterlist *sg, unsigned nbytes,
 773                struct buffer_desc *buf, gfp_t flags,
 774                enum dma_data_direction dir)
 775{
 776        for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
 777                unsigned len = min(nbytes, sg->length);
 778                struct buffer_desc *next_buf;
 779                u32 next_buf_phys;
 780                void *ptr;
 781
 782                nbytes -= len;
 783                ptr = page_address(sg_page(sg)) + sg->offset;
 784                next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
 785                if (!next_buf) {
 786                        buf = NULL;
 787                        break;
 788                }
 789                sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
 790                buf->next = next_buf;
 791                buf->phys_next = next_buf_phys;
 792                buf = next_buf;
 793
 794                buf->phys_addr = sg_dma_address(sg);
 795                buf->buf_len = len;
 796                buf->dir = dir;
 797        }
 798        buf->next = NULL;
 799        buf->phys_next = 0;
 800        return buf;
 801}
 802
 803static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 804                        unsigned int key_len)
 805{
 806        struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 807        u32 *flags = &tfm->base.crt_flags;
 808        int ret;
 809
 810        init_completion(&ctx->completion);
 811        atomic_inc(&ctx->configuring);
 812
 813        reset_sa_dir(&ctx->encrypt);
 814        reset_sa_dir(&ctx->decrypt);
 815
 816        ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
 817        ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
 818
 819        ret = setup_cipher(&tfm->base, 0, key, key_len);
 820        if (ret)
 821                goto out;
 822        ret = setup_cipher(&tfm->base, 1, key, key_len);
 823        if (ret)
 824                goto out;
 825
 826        if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
 827                if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
 828                        ret = -EINVAL;
 829                } else {
 830                        *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
 831                }
 832        }
 833out:
 834        if (!atomic_dec_and_test(&ctx->configuring))
 835                wait_for_completion(&ctx->completion);
 836        return ret;
 837}
 838
 839static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 840                unsigned int key_len)
 841{
 842        struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 843
 844        /* the nonce is stored in bytes at end of key */
 845        if (key_len < CTR_RFC3686_NONCE_SIZE)
 846                return -EINVAL;
 847
 848        memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
 849                        CTR_RFC3686_NONCE_SIZE);
 850
 851        key_len -= CTR_RFC3686_NONCE_SIZE;
 852        return ablk_setkey(tfm, key, key_len);
 853}
 854
 855static int ablk_perform(struct ablkcipher_request *req, int encrypt)
 856{
 857        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 858        struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 859        unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
 860        struct ix_sa_dir *dir;
 861        struct crypt_ctl *crypt;
 862        unsigned int nbytes = req->nbytes;
 863        enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
 864        struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
 865        struct buffer_desc src_hook;
 866        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 867                                GFP_KERNEL : GFP_ATOMIC;
 868
 869        if (qmgr_stat_full(SEND_QID))
 870                return -EAGAIN;
 871        if (atomic_read(&ctx->configuring))
 872                return -EAGAIN;
 873
 874        dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 875
 876        crypt = get_crypt_desc();
 877        if (!crypt)
 878                return -ENOMEM;
 879
 880        crypt->data.ablk_req = req;
 881        crypt->crypto_ctx = dir->npe_ctx_phys;
 882        crypt->mode = dir->npe_mode;
 883        crypt->init_len = dir->npe_ctx_idx;
 884
 885        crypt->crypt_offs = 0;
 886        crypt->crypt_len = nbytes;
 887
 888        BUG_ON(ivsize && !req->info);
 889        memcpy(crypt->iv, req->info, ivsize);
 890        if (req->src != req->dst) {
 891                struct buffer_desc dst_hook;
 892                crypt->mode |= NPE_OP_NOT_IN_PLACE;
 893                /* This was never tested by Intel
 894                 * for more than one dst buffer, I think. */
 895                BUG_ON(req->dst->length < nbytes);
 896                req_ctx->dst = NULL;
 897                if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
 898                                        flags, DMA_FROM_DEVICE))
 899                        goto free_buf_dest;
 900                src_direction = DMA_TO_DEVICE;
 901                req_ctx->dst = dst_hook.next;
 902                crypt->dst_buf = dst_hook.phys_next;
 903        } else {
 904                req_ctx->dst = NULL;
 905        }
 906        req_ctx->src = NULL;
 907        if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
 908                                flags, src_direction))
 909                goto free_buf_src;
 910
 911        req_ctx->src = src_hook.next;
 912        crypt->src_buf = src_hook.phys_next;
 913        crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
 914        qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 915        BUG_ON(qmgr_stat_overflow(SEND_QID));
 916        return -EINPROGRESS;
 917
 918free_buf_src:
 919        free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 920free_buf_dest:
 921        if (req->src != req->dst) {
 922                free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 923        }
 924        crypt->ctl_flags = CTL_FLAG_UNUSED;
 925        return -ENOMEM;
 926}
 927
 928static int ablk_encrypt(struct ablkcipher_request *req)
 929{
 930        return ablk_perform(req, 1);
 931}
 932
 933static int ablk_decrypt(struct ablkcipher_request *req)
 934{
 935        return ablk_perform(req, 0);
 936}
 937
 938static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
 939{
 940        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 941        struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 942        u8 iv[CTR_RFC3686_BLOCK_SIZE];
 943        u8 *info = req->info;
 944        int ret;
 945
 946        /* set up counter block */
 947        memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
 948        memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
 949
 950        /* initialize counter portion of counter block */
 951        *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
 952                cpu_to_be32(1);
 953
 954        req->info = iv;
 955        ret = ablk_perform(req, 1);
 956        req->info = info;
 957        return ret;
 958}
 959
 960static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
 961                unsigned int nbytes)
 962{
 963        int offset = 0;
 964
 965        if (!nbytes)
 966                return 0;
 967
 968        for (;;) {
 969                if (start < offset + sg->length)
 970                        break;
 971
 972                offset += sg->length;
 973                sg = scatterwalk_sg_next(sg);
 974        }
 975        return (start + nbytes > offset + sg->length);
 976}
 977
 978static int aead_perform(struct aead_request *req, int encrypt,
 979                int cryptoffset, int eff_cryptlen, u8 *iv)
 980{
 981        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 982        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
 983        unsigned ivsize = crypto_aead_ivsize(tfm);
 984        unsigned authsize = crypto_aead_authsize(tfm);
 985        struct ix_sa_dir *dir;
 986        struct crypt_ctl *crypt;
 987        unsigned int cryptlen;
 988        struct buffer_desc *buf, src_hook;
 989        struct aead_ctx *req_ctx = aead_request_ctx(req);
 990        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 991                                GFP_KERNEL : GFP_ATOMIC;
 992
 993        if (qmgr_stat_full(SEND_QID))
 994                return -EAGAIN;
 995        if (atomic_read(&ctx->configuring))
 996                return -EAGAIN;
 997
 998        if (encrypt) {
 999                dir = &ctx->encrypt;
1000                cryptlen = req->cryptlen;
1001        } else {
1002                dir = &ctx->decrypt;
1003                /* req->cryptlen includes the authsize when decrypting */
1004                cryptlen = req->cryptlen -authsize;
1005                eff_cryptlen -= authsize;
1006        }
1007        crypt = get_crypt_desc();
1008        if (!crypt)
1009                return -ENOMEM;
1010
1011        crypt->data.aead_req = req;
1012        crypt->crypto_ctx = dir->npe_ctx_phys;
1013        crypt->mode = dir->npe_mode;
1014        crypt->init_len = dir->npe_ctx_idx;
1015
1016        crypt->crypt_offs = cryptoffset;
1017        crypt->crypt_len = eff_cryptlen;
1018
1019        crypt->auth_offs = 0;
1020        crypt->auth_len = req->assoclen + ivsize + cryptlen;
1021        BUG_ON(ivsize && !req->iv);
1022        memcpy(crypt->iv, req->iv, ivsize);
1023
1024        if (req->src != req->dst) {
1025                BUG(); /* -ENOTSUP because of my lazyness */
1026        }
1027
1028        /* ASSOC data */
1029        buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
1030                flags, DMA_TO_DEVICE);
1031        req_ctx->buffer = src_hook.next;
1032        crypt->src_buf = src_hook.phys_next;
1033        if (!buf)
1034                goto out;
1035        /* IV */
1036        sg_init_table(&req_ctx->ivlist, 1);
1037        sg_set_buf(&req_ctx->ivlist, iv, ivsize);
1038        buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
1039                        DMA_BIDIRECTIONAL);
1040        if (!buf)
1041                goto free_chain;
1042        if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
1043                /* The 12 hmac bytes are scattered,
1044                 * we need to copy them into a safe buffer */
1045                req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1046                                &crypt->icv_rev_aes);
1047                if (unlikely(!req_ctx->hmac_virt))
1048                        goto free_chain;
1049                if (!encrypt) {
1050                        scatterwalk_map_and_copy(req_ctx->hmac_virt,
1051                                req->src, cryptlen, authsize, 0);
1052                }
1053                req_ctx->encrypt = encrypt;
1054        } else {
1055                req_ctx->hmac_virt = NULL;
1056        }
1057        /* Crypt */
1058        buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
1059                        DMA_BIDIRECTIONAL);
1060        if (!buf)
1061                goto free_hmac_virt;
1062        if (!req_ctx->hmac_virt) {
1063                crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
1064        }
1065
1066        crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1067        qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1068        BUG_ON(qmgr_stat_overflow(SEND_QID));
1069        return -EINPROGRESS;
1070free_hmac_virt:
1071        if (req_ctx->hmac_virt) {
1072                dma_pool_free(buffer_pool, req_ctx->hmac_virt,
1073                                crypt->icv_rev_aes);
1074        }
1075free_chain:
1076        free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
1077out:
1078        crypt->ctl_flags = CTL_FLAG_UNUSED;
1079        return -ENOMEM;
1080}
1081
1082static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1083{
1084        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1085        u32 *flags = &tfm->base.crt_flags;
1086        unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
1087        int ret;
1088
1089        if (!ctx->enckey_len && !ctx->authkey_len)
1090                return 0;
1091        init_completion(&ctx->completion);
1092        atomic_inc(&ctx->configuring);
1093
1094        reset_sa_dir(&ctx->encrypt);
1095        reset_sa_dir(&ctx->decrypt);
1096
1097        ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1098        if (ret)
1099                goto out;
1100        ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1101        if (ret)
1102                goto out;
1103        ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1104                        ctx->authkey_len, digest_len);
1105        if (ret)
1106                goto out;
1107        ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
1108                        ctx->authkey_len, digest_len);
1109        if (ret)
1110                goto out;
1111
1112        if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1113                if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1114                        ret = -EINVAL;
1115                        goto out;
1116                } else {
1117                        *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1118                }
1119        }
1120out:
1121        if (!atomic_dec_and_test(&ctx->configuring))
1122                wait_for_completion(&ctx->completion);
1123        return ret;
1124}
1125
1126static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1127{
1128        int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
1129
1130        if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1131                return -EINVAL;
1132        return aead_setup(tfm, authsize);
1133}
1134
1135static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1136                        unsigned int keylen)
1137{
1138        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1139        struct rtattr *rta = (struct rtattr *)key;
1140        struct crypto_authenc_key_param *param;
1141
1142        if (!RTA_OK(rta, keylen))
1143                goto badkey;
1144        if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1145                goto badkey;
1146        if (RTA_PAYLOAD(rta) < sizeof(*param))
1147                goto badkey;
1148
1149        param = RTA_DATA(rta);
1150        ctx->enckey_len = be32_to_cpu(param->enckeylen);
1151
1152        key += RTA_ALIGN(rta->rta_len);
1153        keylen -= RTA_ALIGN(rta->rta_len);
1154
1155        if (keylen < ctx->enckey_len)
1156                goto badkey;
1157
1158        ctx->authkey_len = keylen - ctx->enckey_len;
1159        memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
1160        memcpy(ctx->authkey, key, ctx->authkey_len);
1161
1162        return aead_setup(tfm, crypto_aead_authsize(tfm));
1163badkey:
1164        ctx->enckey_len = 0;
1165        crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1166        return -EINVAL;
1167}
1168
1169static int aead_encrypt(struct aead_request *req)
1170{
1171        unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1172        return aead_perform(req, 1, req->assoclen + ivsize,
1173                        req->cryptlen, req->iv);
1174}
1175
1176static int aead_decrypt(struct aead_request *req)
1177{
1178        unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1179        return aead_perform(req, 0, req->assoclen + ivsize,
1180                        req->cryptlen, req->iv);
1181}
1182
1183static int aead_givencrypt(struct aead_givcrypt_request *req)
1184{
1185        struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
1186        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1187        unsigned len, ivsize = crypto_aead_ivsize(tfm);
1188        __be64 seq;
1189
1190        /* copied from eseqiv.c */
1191        if (!ctx->salted) {
1192                get_random_bytes(ctx->salt, ivsize);
1193                ctx->salted = 1;
1194        }
1195        memcpy(req->areq.iv, ctx->salt, ivsize);
1196        len = ivsize;
1197        if (ivsize > sizeof(u64)) {
1198                memset(req->giv, 0, ivsize - sizeof(u64));
1199                len = sizeof(u64);
1200        }
1201        seq = cpu_to_be64(req->seq);
1202        memcpy(req->giv + ivsize - len, &seq, len);
1203        return aead_perform(&req->areq, 1, req->areq.assoclen,
1204                        req->areq.cryptlen +ivsize, req->giv);
1205}
1206
1207static struct ixp_alg ixp4xx_algos[] = {
1208{
1209        .crypto = {
1210                .cra_name       = "cbc(des)",
1211                .cra_blocksize  = DES_BLOCK_SIZE,
1212                .cra_u          = { .ablkcipher = {
1213                        .min_keysize    = DES_KEY_SIZE,
1214                        .max_keysize    = DES_KEY_SIZE,
1215                        .ivsize         = DES_BLOCK_SIZE,
1216                        .geniv          = "eseqiv",
1217                        }
1218                }
1219        },
1220        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1221        .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1222
1223}, {
1224        .crypto = {
1225                .cra_name       = "ecb(des)",
1226                .cra_blocksize  = DES_BLOCK_SIZE,
1227                .cra_u          = { .ablkcipher = {
1228                        .min_keysize    = DES_KEY_SIZE,
1229                        .max_keysize    = DES_KEY_SIZE,
1230                        }
1231                }
1232        },
1233        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1234        .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1235}, {
1236        .crypto = {
1237                .cra_name       = "cbc(des3_ede)",
1238                .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
1239                .cra_u          = { .ablkcipher = {
1240                        .min_keysize    = DES3_EDE_KEY_SIZE,
1241                        .max_keysize    = DES3_EDE_KEY_SIZE,
1242                        .ivsize         = DES3_EDE_BLOCK_SIZE,
1243                        .geniv          = "eseqiv",
1244                        }
1245                }
1246        },
1247        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1248        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1249}, {
1250        .crypto = {
1251                .cra_name       = "ecb(des3_ede)",
1252                .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
1253                .cra_u          = { .ablkcipher = {
1254                        .min_keysize    = DES3_EDE_KEY_SIZE,
1255                        .max_keysize    = DES3_EDE_KEY_SIZE,
1256                        }
1257                }
1258        },
1259        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1260        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1261}, {
1262        .crypto = {
1263                .cra_name       = "cbc(aes)",
1264                .cra_blocksize  = AES_BLOCK_SIZE,
1265                .cra_u          = { .ablkcipher = {
1266                        .min_keysize    = AES_MIN_KEY_SIZE,
1267                        .max_keysize    = AES_MAX_KEY_SIZE,
1268                        .ivsize         = AES_BLOCK_SIZE,
1269                        .geniv          = "eseqiv",
1270                        }
1271                }
1272        },
1273        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1274        .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1275}, {
1276        .crypto = {
1277                .cra_name       = "ecb(aes)",
1278                .cra_blocksize  = AES_BLOCK_SIZE,
1279                .cra_u          = { .ablkcipher = {
1280                        .min_keysize    = AES_MIN_KEY_SIZE,
1281                        .max_keysize    = AES_MAX_KEY_SIZE,
1282                        }
1283                }
1284        },
1285        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1286        .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1287}, {
1288        .crypto = {
1289                .cra_name       = "ctr(aes)",
1290                .cra_blocksize  = AES_BLOCK_SIZE,
1291                .cra_u          = { .ablkcipher = {
1292                        .min_keysize    = AES_MIN_KEY_SIZE,
1293                        .max_keysize    = AES_MAX_KEY_SIZE,
1294                        .ivsize         = AES_BLOCK_SIZE,
1295                        .geniv          = "eseqiv",
1296                        }
1297                }
1298        },
1299        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1300        .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1301}, {
1302        .crypto = {
1303                .cra_name       = "rfc3686(ctr(aes))",
1304                .cra_blocksize  = AES_BLOCK_SIZE,
1305                .cra_u          = { .ablkcipher = {
1306                        .min_keysize    = AES_MIN_KEY_SIZE,
1307                        .max_keysize    = AES_MAX_KEY_SIZE,
1308                        .ivsize         = AES_BLOCK_SIZE,
1309                        .geniv          = "eseqiv",
1310                        .setkey         = ablk_rfc3686_setkey,
1311                        .encrypt        = ablk_rfc3686_crypt,
1312                        .decrypt        = ablk_rfc3686_crypt }
1313                }
1314        },
1315        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1316        .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1317}, {
1318        .crypto = {
1319                .cra_name       = "authenc(hmac(md5),cbc(des))",
1320                .cra_blocksize  = DES_BLOCK_SIZE,
1321                .cra_u          = { .aead = {
1322                        .ivsize         = DES_BLOCK_SIZE,
1323                        .maxauthsize    = MD5_DIGEST_SIZE,
1324                        }
1325                }
1326        },
1327        .hash = &hash_alg_md5,
1328        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1329        .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1330}, {
1331        .crypto = {
1332                .cra_name       = "authenc(hmac(md5),cbc(des3_ede))",
1333                .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
1334                .cra_u          = { .aead = {
1335                        .ivsize         = DES3_EDE_BLOCK_SIZE,
1336                        .maxauthsize    = MD5_DIGEST_SIZE,
1337                        }
1338                }
1339        },
1340        .hash = &hash_alg_md5,
1341        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1342        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1343}, {
1344        .crypto = {
1345                .cra_name       = "authenc(hmac(sha1),cbc(des))",
1346                .cra_blocksize  = DES_BLOCK_SIZE,
1347                .cra_u          = { .aead = {
1348                        .ivsize         = DES_BLOCK_SIZE,
1349                        .maxauthsize    = SHA1_DIGEST_SIZE,
1350                        }
1351                }
1352        },
1353        .hash = &hash_alg_sha1,
1354        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1355        .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1356}, {
1357        .crypto = {
1358                .cra_name       = "authenc(hmac(sha1),cbc(des3_ede))",
1359                .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
1360                .cra_u          = { .aead = {
1361                        .ivsize         = DES3_EDE_BLOCK_SIZE,
1362                        .maxauthsize    = SHA1_DIGEST_SIZE,
1363                        }
1364                }
1365        },
1366        .hash = &hash_alg_sha1,
1367        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1368        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1369}, {
1370        .crypto = {
1371                .cra_name       = "authenc(hmac(md5),cbc(aes))",
1372                .cra_blocksize  = AES_BLOCK_SIZE,
1373                .cra_u          = { .aead = {
1374                        .ivsize         = AES_BLOCK_SIZE,
1375                        .maxauthsize    = MD5_DIGEST_SIZE,
1376                        }
1377                }
1378        },
1379        .hash = &hash_alg_md5,
1380        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1381        .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1382}, {
1383        .crypto = {
1384                .cra_name       = "authenc(hmac(sha1),cbc(aes))",
1385                .cra_blocksize  = AES_BLOCK_SIZE,
1386                .cra_u          = { .aead = {
1387                        .ivsize         = AES_BLOCK_SIZE,
1388                        .maxauthsize    = SHA1_DIGEST_SIZE,
1389                        }
1390                }
1391        },
1392        .hash = &hash_alg_sha1,
1393        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1394        .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1395} };
1396
1397#define IXP_POSTFIX "-ixp4xx"
1398static int __init ixp_module_init(void)
1399{
1400        int num = ARRAY_SIZE(ixp4xx_algos);
1401        int i,err ;
1402
1403        if (platform_device_register(&pseudo_dev))
1404                return -ENODEV;
1405
1406        spin_lock_init(&desc_lock);
1407        spin_lock_init(&emerg_lock);
1408
1409        err = init_ixp_crypto();
1410        if (err) {
1411                platform_device_unregister(&pseudo_dev);
1412                return err;
1413        }
1414        for (i=0; i< num; i++) {
1415                struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1416
1417                if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1418                        "%s"IXP_POSTFIX, cra->cra_name) >=
1419                        CRYPTO_MAX_ALG_NAME)
1420                {
1421                        continue;
1422                }
1423                if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1424                        continue;
1425                }
1426                if (!ixp4xx_algos[i].hash) {
1427                        /* block ciphers */
1428                        cra->cra_type = &crypto_ablkcipher_type;
1429                        cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1430                                         CRYPTO_ALG_ASYNC;
1431                        if (!cra->cra_ablkcipher.setkey)
1432                                cra->cra_ablkcipher.setkey = ablk_setkey;
1433                        if (!cra->cra_ablkcipher.encrypt)
1434                                cra->cra_ablkcipher.encrypt = ablk_encrypt;
1435                        if (!cra->cra_ablkcipher.decrypt)
1436                                cra->cra_ablkcipher.decrypt = ablk_decrypt;
1437                        cra->cra_init = init_tfm_ablk;
1438                } else {
1439                        /* authenc */
1440                        cra->cra_type = &crypto_aead_type;
1441                        cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
1442                                         CRYPTO_ALG_ASYNC;
1443                        cra->cra_aead.setkey = aead_setkey;
1444                        cra->cra_aead.setauthsize = aead_setauthsize;
1445                        cra->cra_aead.encrypt = aead_encrypt;
1446                        cra->cra_aead.decrypt = aead_decrypt;
1447                        cra->cra_aead.givencrypt = aead_givencrypt;
1448                        cra->cra_init = init_tfm_aead;
1449                }
1450                cra->cra_ctxsize = sizeof(struct ixp_ctx);
1451                cra->cra_module = THIS_MODULE;
1452                cra->cra_alignmask = 3;
1453                cra->cra_priority = 300;
1454                cra->cra_exit = exit_tfm;
1455                if (crypto_register_alg(cra))
1456                        printk(KERN_ERR "Failed to register '%s'\n",
1457                                cra->cra_name);
1458                else
1459                        ixp4xx_algos[i].registered = 1;
1460        }
1461        return 0;
1462}
1463
1464static void __exit ixp_module_exit(void)
1465{
1466        int num = ARRAY_SIZE(ixp4xx_algos);
1467        int i;
1468
1469        for (i=0; i< num; i++) {
1470                if (ixp4xx_algos[i].registered)
1471                        crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1472        }
1473        release_ixp_crypto();
1474        platform_device_unregister(&pseudo_dev);
1475}
1476
1477module_init(ixp_module_init);
1478module_exit(ixp_module_exit);
1479
1480MODULE_LICENSE("GPL");
1481MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1482MODULE_DESCRIPTION("IXP4xx hardware crypto");
1483
1484