linux/drivers/crypto/ixp4xx_crypto.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Intel IXP4xx NPE-C crypto driver
   4 *
   5 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
   6 */
   7
   8#include <linux/platform_device.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/dmapool.h>
  11#include <linux/crypto.h>
  12#include <linux/kernel.h>
  13#include <linux/rtnetlink.h>
  14#include <linux/interrupt.h>
  15#include <linux/spinlock.h>
  16#include <linux/gfp.h>
  17#include <linux/module.h>
  18#include <linux/of.h>
  19
  20#include <crypto/ctr.h>
  21#include <crypto/internal/des.h>
  22#include <crypto/aes.h>
  23#include <crypto/hmac.h>
  24#include <crypto/sha1.h>
  25#include <crypto/algapi.h>
  26#include <crypto/internal/aead.h>
  27#include <crypto/internal/skcipher.h>
  28#include <crypto/authenc.h>
  29#include <crypto/scatterwalk.h>
  30
  31#include <linux/soc/ixp4xx/npe.h>
  32#include <linux/soc/ixp4xx/qmgr.h>
  33
  34/* Intermittent includes, delete this after v5.14-rc1 */
  35#include <linux/soc/ixp4xx/cpu.h>
  36#include <mach/ixp4xx-regs.h>
  37
  38#define MAX_KEYLEN 32
  39
  40/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
  41#define NPE_CTX_LEN 80
  42#define AES_BLOCK128 16
  43
  44#define NPE_OP_HASH_VERIFY   0x01
  45#define NPE_OP_CCM_ENABLE    0x04
  46#define NPE_OP_CRYPT_ENABLE  0x08
  47#define NPE_OP_HASH_ENABLE   0x10
  48#define NPE_OP_NOT_IN_PLACE  0x20
  49#define NPE_OP_HMAC_DISABLE  0x40
  50#define NPE_OP_CRYPT_ENCRYPT 0x80
  51
  52#define NPE_OP_CCM_GEN_MIC   0xcc
  53#define NPE_OP_HASH_GEN_ICV  0x50
  54#define NPE_OP_ENC_GEN_KEY   0xc9
  55
  56#define MOD_ECB     0x0000
  57#define MOD_CTR     0x1000
  58#define MOD_CBC_ENC 0x2000
  59#define MOD_CBC_DEC 0x3000
  60#define MOD_CCM_ENC 0x4000
  61#define MOD_CCM_DEC 0x5000
  62
  63#define KEYLEN_128  4
  64#define KEYLEN_192  6
  65#define KEYLEN_256  8
  66
  67#define CIPH_DECR   0x0000
  68#define CIPH_ENCR   0x0400
  69
  70#define MOD_DES     0x0000
  71#define MOD_TDEA2   0x0100
  72#define MOD_3DES   0x0200
  73#define MOD_AES     0x0800
  74#define MOD_AES128  (0x0800 | KEYLEN_128)
  75#define MOD_AES192  (0x0900 | KEYLEN_192)
  76#define MOD_AES256  (0x0a00 | KEYLEN_256)
  77
  78#define MAX_IVLEN   16
  79#define NPE_QLEN    16
  80/* Space for registering when the first
  81 * NPE_QLEN crypt_ctl are busy */
  82#define NPE_QLEN_TOTAL 64
  83
  84#define CTL_FLAG_UNUSED         0x0000
  85#define CTL_FLAG_USED           0x1000
  86#define CTL_FLAG_PERFORM_ABLK   0x0001
  87#define CTL_FLAG_GEN_ICV        0x0002
  88#define CTL_FLAG_GEN_REVAES     0x0004
  89#define CTL_FLAG_PERFORM_AEAD   0x0008
  90#define CTL_FLAG_MASK           0x000f
  91
  92#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
  93
  94#define MD5_DIGEST_SIZE   16
  95
  96struct buffer_desc {
  97        u32 phys_next;
  98#ifdef __ARMEB__
  99        u16 buf_len;
 100        u16 pkt_len;
 101#else
 102        u16 pkt_len;
 103        u16 buf_len;
 104#endif
 105        dma_addr_t phys_addr;
 106        u32 __reserved[4];
 107        struct buffer_desc *next;
 108        enum dma_data_direction dir;
 109};
 110
 111struct crypt_ctl {
 112#ifdef __ARMEB__
 113        u8 mode;                /* NPE_OP_*  operation mode */
 114        u8 init_len;
 115        u16 reserved;
 116#else
 117        u16 reserved;
 118        u8 init_len;
 119        u8 mode;                /* NPE_OP_*  operation mode */
 120#endif
 121        u8 iv[MAX_IVLEN];       /* IV for CBC mode or CTR IV for CTR mode */
 122        dma_addr_t icv_rev_aes; /* icv or rev aes */
 123        dma_addr_t src_buf;
 124        dma_addr_t dst_buf;
 125#ifdef __ARMEB__
 126        u16 auth_offs;          /* Authentication start offset */
 127        u16 auth_len;           /* Authentication data length */
 128        u16 crypt_offs;         /* Cryption start offset */
 129        u16 crypt_len;          /* Cryption data length */
 130#else
 131        u16 auth_len;           /* Authentication data length */
 132        u16 auth_offs;          /* Authentication start offset */
 133        u16 crypt_len;          /* Cryption data length */
 134        u16 crypt_offs;         /* Cryption start offset */
 135#endif
 136        u32 aadAddr;            /* Additional Auth Data Addr for CCM mode */
 137        u32 crypto_ctx;         /* NPE Crypto Param structure address */
 138
 139        /* Used by Host: 4*4 bytes*/
 140        unsigned int ctl_flags;
 141        union {
 142                struct skcipher_request *ablk_req;
 143                struct aead_request *aead_req;
 144                struct crypto_tfm *tfm;
 145        } data;
 146        struct buffer_desc *regist_buf;
 147        u8 *regist_ptr;
 148};
 149
 150struct ablk_ctx {
 151        struct buffer_desc *src;
 152        struct buffer_desc *dst;
 153        u8 iv[MAX_IVLEN];
 154        bool encrypt;
 155        struct skcipher_request fallback_req;   // keep at the end
 156};
 157
 158struct aead_ctx {
 159        struct buffer_desc *src;
 160        struct buffer_desc *dst;
 161        struct scatterlist ivlist;
 162        /* used when the hmac is not on one sg entry */
 163        u8 *hmac_virt;
 164        int encrypt;
 165};
 166
 167struct ix_hash_algo {
 168        u32 cfgword;
 169        unsigned char *icv;
 170};
 171
 172struct ix_sa_dir {
 173        unsigned char *npe_ctx;
 174        dma_addr_t npe_ctx_phys;
 175        int npe_ctx_idx;
 176        u8 npe_mode;
 177};
 178
 179struct ixp_ctx {
 180        struct ix_sa_dir encrypt;
 181        struct ix_sa_dir decrypt;
 182        int authkey_len;
 183        u8 authkey[MAX_KEYLEN];
 184        int enckey_len;
 185        u8 enckey[MAX_KEYLEN];
 186        u8 salt[MAX_IVLEN];
 187        u8 nonce[CTR_RFC3686_NONCE_SIZE];
 188        unsigned int salted;
 189        atomic_t configuring;
 190        struct completion completion;
 191        struct crypto_skcipher *fallback_tfm;
 192};
 193
 194struct ixp_alg {
 195        struct skcipher_alg crypto;
 196        const struct ix_hash_algo *hash;
 197        u32 cfg_enc;
 198        u32 cfg_dec;
 199
 200        int registered;
 201};
 202
 203struct ixp_aead_alg {
 204        struct aead_alg crypto;
 205        const struct ix_hash_algo *hash;
 206        u32 cfg_enc;
 207        u32 cfg_dec;
 208
 209        int registered;
 210};
 211
 212static const struct ix_hash_algo hash_alg_md5 = {
 213        .cfgword        = 0xAA010004,
 214        .icv            = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
 215                          "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
 216};
 217
 218static const struct ix_hash_algo hash_alg_sha1 = {
 219        .cfgword        = 0x00000005,
 220        .icv            = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
 221                          "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
 222};
 223
 224static struct npe *npe_c;
 225
 226static unsigned int send_qid;
 227static unsigned int recv_qid;
 228static struct dma_pool *buffer_pool;
 229static struct dma_pool *ctx_pool;
 230
 231static struct crypt_ctl *crypt_virt;
 232static dma_addr_t crypt_phys;
 233
 234static int support_aes = 1;
 235
 236static struct platform_device *pdev;
 237
 238static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
 239{
 240        return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
 241}
 242
 243static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
 244{
 245        return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
 246}
 247
 248static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
 249{
 250        return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
 251}
 252
 253static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
 254{
 255        return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
 256}
 257
 258static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
 259{
 260        return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
 261}
 262
 263static int setup_crypt_desc(void)
 264{
 265        struct device *dev = &pdev->dev;
 266
 267        BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
 268        crypt_virt = dma_alloc_coherent(dev,
 269                                        NPE_QLEN * sizeof(struct crypt_ctl),
 270                                        &crypt_phys, GFP_ATOMIC);
 271        if (!crypt_virt)
 272                return -ENOMEM;
 273        return 0;
 274}
 275
 276static DEFINE_SPINLOCK(desc_lock);
 277static struct crypt_ctl *get_crypt_desc(void)
 278{
 279        int i;
 280        static int idx;
 281        unsigned long flags;
 282
 283        spin_lock_irqsave(&desc_lock, flags);
 284
 285        if (unlikely(!crypt_virt))
 286                setup_crypt_desc();
 287        if (unlikely(!crypt_virt)) {
 288                spin_unlock_irqrestore(&desc_lock, flags);
 289                return NULL;
 290        }
 291        i = idx;
 292        if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
 293                if (++idx >= NPE_QLEN)
 294                        idx = 0;
 295                crypt_virt[i].ctl_flags = CTL_FLAG_USED;
 296                spin_unlock_irqrestore(&desc_lock, flags);
 297                return crypt_virt + i;
 298        } else {
 299                spin_unlock_irqrestore(&desc_lock, flags);
 300                return NULL;
 301        }
 302}
 303
 304static DEFINE_SPINLOCK(emerg_lock);
 305static struct crypt_ctl *get_crypt_desc_emerg(void)
 306{
 307        int i;
 308        static int idx = NPE_QLEN;
 309        struct crypt_ctl *desc;
 310        unsigned long flags;
 311
 312        desc = get_crypt_desc();
 313        if (desc)
 314                return desc;
 315        if (unlikely(!crypt_virt))
 316                return NULL;
 317
 318        spin_lock_irqsave(&emerg_lock, flags);
 319        i = idx;
 320        if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
 321                if (++idx >= NPE_QLEN_TOTAL)
 322                        idx = NPE_QLEN;
 323                crypt_virt[i].ctl_flags = CTL_FLAG_USED;
 324                spin_unlock_irqrestore(&emerg_lock, flags);
 325                return crypt_virt + i;
 326        } else {
 327                spin_unlock_irqrestore(&emerg_lock, flags);
 328                return NULL;
 329        }
 330}
 331
 332static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
 333                           dma_addr_t phys)
 334{
 335        while (buf) {
 336                struct buffer_desc *buf1;
 337                u32 phys1;
 338
 339                buf1 = buf->next;
 340                phys1 = buf->phys_next;
 341                dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
 342                dma_pool_free(buffer_pool, buf, phys);
 343                buf = buf1;
 344                phys = phys1;
 345        }
 346}
 347
 348static struct tasklet_struct crypto_done_tasklet;
 349
 350static void finish_scattered_hmac(struct crypt_ctl *crypt)
 351{
 352        struct aead_request *req = crypt->data.aead_req;
 353        struct aead_ctx *req_ctx = aead_request_ctx(req);
 354        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 355        int authsize = crypto_aead_authsize(tfm);
 356        int decryptlen = req->assoclen + req->cryptlen - authsize;
 357
 358        if (req_ctx->encrypt) {
 359                scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
 360                                         decryptlen, authsize, 1);
 361        }
 362        dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
 363}
 364
 365static void one_packet(dma_addr_t phys)
 366{
 367        struct device *dev = &pdev->dev;
 368        struct crypt_ctl *crypt;
 369        struct ixp_ctx *ctx;
 370        int failed;
 371
 372        failed = phys & 0x1 ? -EBADMSG : 0;
 373        phys &= ~0x3;
 374        crypt = crypt_phys2virt(phys);
 375
 376        switch (crypt->ctl_flags & CTL_FLAG_MASK) {
 377        case CTL_FLAG_PERFORM_AEAD: {
 378                struct aead_request *req = crypt->data.aead_req;
 379                struct aead_ctx *req_ctx = aead_request_ctx(req);
 380
 381                free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 382                free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 383                if (req_ctx->hmac_virt)
 384                        finish_scattered_hmac(crypt);
 385
 386                req->base.complete(&req->base, failed);
 387                break;
 388        }
 389        case CTL_FLAG_PERFORM_ABLK: {
 390                struct skcipher_request *req = crypt->data.ablk_req;
 391                struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
 392                struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 393                unsigned int ivsize = crypto_skcipher_ivsize(tfm);
 394                unsigned int offset;
 395
 396                if (ivsize > 0) {
 397                        offset = req->cryptlen - ivsize;
 398                        if (req_ctx->encrypt) {
 399                                scatterwalk_map_and_copy(req->iv, req->dst,
 400                                                         offset, ivsize, 0);
 401                        } else {
 402                                memcpy(req->iv, req_ctx->iv, ivsize);
 403                                memzero_explicit(req_ctx->iv, ivsize);
 404                        }
 405                }
 406
 407                if (req_ctx->dst)
 408                        free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 409
 410                free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 411                req->base.complete(&req->base, failed);
 412                break;
 413        }
 414        case CTL_FLAG_GEN_ICV:
 415                ctx = crypto_tfm_ctx(crypt->data.tfm);
 416                dma_pool_free(ctx_pool, crypt->regist_ptr,
 417                              crypt->regist_buf->phys_addr);
 418                dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
 419                if (atomic_dec_and_test(&ctx->configuring))
 420                        complete(&ctx->completion);
 421                break;
 422        case CTL_FLAG_GEN_REVAES:
 423                ctx = crypto_tfm_ctx(crypt->data.tfm);
 424                *(u32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
 425                if (atomic_dec_and_test(&ctx->configuring))
 426                        complete(&ctx->completion);
 427                break;
 428        default:
 429                BUG();
 430        }
 431        crypt->ctl_flags = CTL_FLAG_UNUSED;
 432}
 433
 434static void irqhandler(void *_unused)
 435{
 436        tasklet_schedule(&crypto_done_tasklet);
 437}
 438
 439static void crypto_done_action(unsigned long arg)
 440{
 441        int i;
 442
 443        for (i = 0; i < 4; i++) {
 444                dma_addr_t phys = qmgr_get_entry(recv_qid);
 445                if (!phys)
 446                        return;
 447                one_packet(phys);
 448        }
 449        tasklet_schedule(&crypto_done_tasklet);
 450}
 451
 452static int init_ixp_crypto(struct device *dev)
 453{
 454        struct device_node *np = dev->of_node;
 455        u32 msg[2] = { 0, 0 };
 456        int ret = -ENODEV;
 457        u32 npe_id;
 458
 459        dev_info(dev, "probing...\n");
 460
 461        /* Locate the NPE and queue manager to use from device tree */
 462        if (IS_ENABLED(CONFIG_OF) && np) {
 463                struct of_phandle_args queue_spec;
 464                struct of_phandle_args npe_spec;
 465
 466                ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
 467                                                       1, 0, &npe_spec);
 468                if (ret) {
 469                        dev_err(dev, "no NPE engine specified\n");
 470                        return -ENODEV;
 471                }
 472                npe_id = npe_spec.args[0];
 473
 474                ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
 475                                                       &queue_spec);
 476                if (ret) {
 477                        dev_err(dev, "no rx queue phandle\n");
 478                        return -ENODEV;
 479                }
 480                recv_qid = queue_spec.args[0];
 481
 482                ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
 483                                                       &queue_spec);
 484                if (ret) {
 485                        dev_err(dev, "no txready queue phandle\n");
 486                        return -ENODEV;
 487                }
 488                send_qid = queue_spec.args[0];
 489        } else {
 490                /*
 491                 * Hardcoded engine when using platform data, this goes away
 492                 * when we switch to using DT only.
 493                 */
 494                npe_id = 2;
 495                send_qid = 29;
 496                recv_qid = 30;
 497        }
 498
 499        npe_c = npe_request(npe_id);
 500        if (!npe_c)
 501                return ret;
 502
 503        if (!npe_running(npe_c)) {
 504                ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
 505                if (ret)
 506                        goto npe_release;
 507                if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
 508                        goto npe_error;
 509        } else {
 510                if (npe_send_message(npe_c, msg, "STATUS_MSG"))
 511                        goto npe_error;
 512
 513                if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
 514                        goto npe_error;
 515        }
 516
 517        switch ((msg[1] >> 16) & 0xff) {
 518        case 3:
 519                dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
 520                support_aes = 0;
 521                break;
 522        case 4:
 523        case 5:
 524                support_aes = 1;
 525                break;
 526        default:
 527                dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
 528                ret = -ENODEV;
 529                goto npe_release;
 530        }
 531        /* buffer_pool will also be used to sometimes store the hmac,
 532         * so assure it is large enough
 533         */
 534        BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
 535        buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
 536                                      32, 0);
 537        ret = -ENOMEM;
 538        if (!buffer_pool)
 539                goto err;
 540
 541        ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
 542        if (!ctx_pool)
 543                goto err;
 544
 545        ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
 546                                 "ixp_crypto:out", NULL);
 547        if (ret)
 548                goto err;
 549        ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
 550                                 "ixp_crypto:in", NULL);
 551        if (ret) {
 552                qmgr_release_queue(send_qid);
 553                goto err;
 554        }
 555        qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
 556        tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
 557
 558        qmgr_enable_irq(recv_qid);
 559        return 0;
 560
 561npe_error:
 562        dev_err(dev, "%s not responding\n", npe_name(npe_c));
 563        ret = -EIO;
 564err:
 565        dma_pool_destroy(ctx_pool);
 566        dma_pool_destroy(buffer_pool);
 567npe_release:
 568        npe_release(npe_c);
 569        return ret;
 570}
 571
 572static void release_ixp_crypto(struct device *dev)
 573{
 574        qmgr_disable_irq(recv_qid);
 575        tasklet_kill(&crypto_done_tasklet);
 576
 577        qmgr_release_queue(send_qid);
 578        qmgr_release_queue(recv_qid);
 579
 580        dma_pool_destroy(ctx_pool);
 581        dma_pool_destroy(buffer_pool);
 582
 583        npe_release(npe_c);
 584
 585        if (crypt_virt)
 586                dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
 587                                  crypt_virt, crypt_phys);
 588}
 589
 590static void reset_sa_dir(struct ix_sa_dir *dir)
 591{
 592        memset(dir->npe_ctx, 0, NPE_CTX_LEN);
 593        dir->npe_ctx_idx = 0;
 594        dir->npe_mode = 0;
 595}
 596
 597static int init_sa_dir(struct ix_sa_dir *dir)
 598{
 599        dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
 600        if (!dir->npe_ctx)
 601                return -ENOMEM;
 602
 603        reset_sa_dir(dir);
 604        return 0;
 605}
 606
 607static void free_sa_dir(struct ix_sa_dir *dir)
 608{
 609        memset(dir->npe_ctx, 0, NPE_CTX_LEN);
 610        dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
 611}
 612
 613static int init_tfm(struct crypto_tfm *tfm)
 614{
 615        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 616        int ret;
 617
 618        atomic_set(&ctx->configuring, 0);
 619        ret = init_sa_dir(&ctx->encrypt);
 620        if (ret)
 621                return ret;
 622        ret = init_sa_dir(&ctx->decrypt);
 623        if (ret)
 624                free_sa_dir(&ctx->encrypt);
 625
 626        return ret;
 627}
 628
 629static int init_tfm_ablk(struct crypto_skcipher *tfm)
 630{
 631        struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
 632        struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
 633        const char *name = crypto_tfm_alg_name(ctfm);
 634
 635        ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
 636        if (IS_ERR(ctx->fallback_tfm)) {
 637                pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
 638                        name, PTR_ERR(ctx->fallback_tfm));
 639                return PTR_ERR(ctx->fallback_tfm);
 640        }
 641
 642        pr_info("Fallback for %s is %s\n",
 643                 crypto_tfm_alg_driver_name(&tfm->base),
 644                 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
 645                 );
 646
 647        crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
 648        return init_tfm(crypto_skcipher_tfm(tfm));
 649}
 650
 651static int init_tfm_aead(struct crypto_aead *tfm)
 652{
 653        crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
 654        return init_tfm(crypto_aead_tfm(tfm));
 655}
 656
 657static void exit_tfm(struct crypto_tfm *tfm)
 658{
 659        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 660
 661        free_sa_dir(&ctx->encrypt);
 662        free_sa_dir(&ctx->decrypt);
 663}
 664
 665static void exit_tfm_ablk(struct crypto_skcipher *tfm)
 666{
 667        struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
 668        struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
 669
 670        crypto_free_skcipher(ctx->fallback_tfm);
 671        exit_tfm(crypto_skcipher_tfm(tfm));
 672}
 673
 674static void exit_tfm_aead(struct crypto_aead *tfm)
 675{
 676        exit_tfm(crypto_aead_tfm(tfm));
 677}
 678
 679static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
 680                              int init_len, u32 ctx_addr, const u8 *key,
 681                              int key_len)
 682{
 683        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 684        struct crypt_ctl *crypt;
 685        struct buffer_desc *buf;
 686        int i;
 687        u8 *pad;
 688        dma_addr_t pad_phys, buf_phys;
 689
 690        BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
 691        pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
 692        if (!pad)
 693                return -ENOMEM;
 694        buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
 695        if (!buf) {
 696                dma_pool_free(ctx_pool, pad, pad_phys);
 697                return -ENOMEM;
 698        }
 699        crypt = get_crypt_desc_emerg();
 700        if (!crypt) {
 701                dma_pool_free(ctx_pool, pad, pad_phys);
 702                dma_pool_free(buffer_pool, buf, buf_phys);
 703                return -EAGAIN;
 704        }
 705
 706        memcpy(pad, key, key_len);
 707        memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
 708        for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
 709                pad[i] ^= xpad;
 710
 711        crypt->data.tfm = tfm;
 712        crypt->regist_ptr = pad;
 713        crypt->regist_buf = buf;
 714
 715        crypt->auth_offs = 0;
 716        crypt->auth_len = HMAC_PAD_BLOCKLEN;
 717        crypt->crypto_ctx = ctx_addr;
 718        crypt->src_buf = buf_phys;
 719        crypt->icv_rev_aes = target;
 720        crypt->mode = NPE_OP_HASH_GEN_ICV;
 721        crypt->init_len = init_len;
 722        crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
 723
 724        buf->next = 0;
 725        buf->buf_len = HMAC_PAD_BLOCKLEN;
 726        buf->pkt_len = 0;
 727        buf->phys_addr = pad_phys;
 728
 729        atomic_inc(&ctx->configuring);
 730        qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
 731        BUG_ON(qmgr_stat_overflow(send_qid));
 732        return 0;
 733}
 734
 735static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
 736                      const u8 *key, int key_len, unsigned int digest_len)
 737{
 738        u32 itarget, otarget, npe_ctx_addr;
 739        unsigned char *cinfo;
 740        int init_len, ret = 0;
 741        u32 cfgword;
 742        struct ix_sa_dir *dir;
 743        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 744        const struct ix_hash_algo *algo;
 745
 746        dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 747        cinfo = dir->npe_ctx + dir->npe_ctx_idx;
 748        algo = ix_hash(tfm);
 749
 750        /* write cfg word to cryptinfo */
 751        cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
 752#ifndef __ARMEB__
 753        cfgword ^= 0xAA000000; /* change the "byte swap" flags */
 754#endif
 755        *(u32 *)cinfo = cpu_to_be32(cfgword);
 756        cinfo += sizeof(cfgword);
 757
 758        /* write ICV to cryptinfo */
 759        memcpy(cinfo, algo->icv, digest_len);
 760        cinfo += digest_len;
 761
 762        itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
 763                                + sizeof(algo->cfgword);
 764        otarget = itarget + digest_len;
 765        init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
 766        npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
 767
 768        dir->npe_ctx_idx += init_len;
 769        dir->npe_mode |= NPE_OP_HASH_ENABLE;
 770
 771        if (!encrypt)
 772                dir->npe_mode |= NPE_OP_HASH_VERIFY;
 773
 774        ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
 775                                 init_len, npe_ctx_addr, key, key_len);
 776        if (ret)
 777                return ret;
 778        return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
 779                                  init_len, npe_ctx_addr, key, key_len);
 780}
 781
 782static int gen_rev_aes_key(struct crypto_tfm *tfm)
 783{
 784        struct crypt_ctl *crypt;
 785        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 786        struct ix_sa_dir *dir = &ctx->decrypt;
 787
 788        crypt = get_crypt_desc_emerg();
 789        if (!crypt)
 790                return -EAGAIN;
 791
 792        *(u32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
 793
 794        crypt->data.tfm = tfm;
 795        crypt->crypt_offs = 0;
 796        crypt->crypt_len = AES_BLOCK128;
 797        crypt->src_buf = 0;
 798        crypt->crypto_ctx = dir->npe_ctx_phys;
 799        crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
 800        crypt->mode = NPE_OP_ENC_GEN_KEY;
 801        crypt->init_len = dir->npe_ctx_idx;
 802        crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
 803
 804        atomic_inc(&ctx->configuring);
 805        qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
 806        BUG_ON(qmgr_stat_overflow(send_qid));
 807        return 0;
 808}
 809
 810static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
 811                        int key_len)
 812{
 813        u8 *cinfo;
 814        u32 cipher_cfg;
 815        u32 keylen_cfg = 0;
 816        struct ix_sa_dir *dir;
 817        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 818        int err;
 819
 820        dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 821        cinfo = dir->npe_ctx;
 822
 823        if (encrypt) {
 824                cipher_cfg = cipher_cfg_enc(tfm);
 825                dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
 826        } else {
 827                cipher_cfg = cipher_cfg_dec(tfm);
 828        }
 829        if (cipher_cfg & MOD_AES) {
 830                switch (key_len) {
 831                case 16:
 832                        keylen_cfg = MOD_AES128;
 833                        break;
 834                case 24:
 835                        keylen_cfg = MOD_AES192;
 836                        break;
 837                case 32:
 838                        keylen_cfg = MOD_AES256;
 839                        break;
 840                default:
 841                        return -EINVAL;
 842                }
 843                cipher_cfg |= keylen_cfg;
 844        } else {
 845                err = crypto_des_verify_key(tfm, key);
 846                if (err)
 847                        return err;
 848        }
 849        /* write cfg word to cryptinfo */
 850        *(u32 *)cinfo = cpu_to_be32(cipher_cfg);
 851        cinfo += sizeof(cipher_cfg);
 852
 853        /* write cipher key to cryptinfo */
 854        memcpy(cinfo, key, key_len);
 855        /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
 856        if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
 857                memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
 858                key_len = DES3_EDE_KEY_SIZE;
 859        }
 860        dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
 861        dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
 862        if ((cipher_cfg & MOD_AES) && !encrypt)
 863                return gen_rev_aes_key(tfm);
 864
 865        return 0;
 866}
 867
 868static struct buffer_desc *chainup_buffers(struct device *dev,
 869                struct scatterlist *sg, unsigned int nbytes,
 870                struct buffer_desc *buf, gfp_t flags,
 871                enum dma_data_direction dir)
 872{
 873        for (; nbytes > 0; sg = sg_next(sg)) {
 874                unsigned int len = min(nbytes, sg->length);
 875                struct buffer_desc *next_buf;
 876                dma_addr_t next_buf_phys;
 877                void *ptr;
 878
 879                nbytes -= len;
 880                ptr = sg_virt(sg);
 881                next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
 882                if (!next_buf) {
 883                        buf = NULL;
 884                        break;
 885                }
 886                sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
 887                buf->next = next_buf;
 888                buf->phys_next = next_buf_phys;
 889                buf = next_buf;
 890
 891                buf->phys_addr = sg_dma_address(sg);
 892                buf->buf_len = len;
 893                buf->dir = dir;
 894        }
 895        buf->next = NULL;
 896        buf->phys_next = 0;
 897        return buf;
 898}
 899
 900static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
 901                       unsigned int key_len)
 902{
 903        struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
 904        int ret;
 905
 906        init_completion(&ctx->completion);
 907        atomic_inc(&ctx->configuring);
 908
 909        reset_sa_dir(&ctx->encrypt);
 910        reset_sa_dir(&ctx->decrypt);
 911
 912        ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
 913        ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
 914
 915        ret = setup_cipher(&tfm->base, 0, key, key_len);
 916        if (ret)
 917                goto out;
 918        ret = setup_cipher(&tfm->base, 1, key, key_len);
 919out:
 920        if (!atomic_dec_and_test(&ctx->configuring))
 921                wait_for_completion(&ctx->completion);
 922        if (ret)
 923                return ret;
 924        crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
 925        crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 926
 927        return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
 928}
 929
 930static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
 931                            unsigned int key_len)
 932{
 933        return verify_skcipher_des3_key(tfm, key) ?:
 934               ablk_setkey(tfm, key, key_len);
 935}
 936
 937static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
 938                               unsigned int key_len)
 939{
 940        struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
 941
 942        /* the nonce is stored in bytes at end of key */
 943        if (key_len < CTR_RFC3686_NONCE_SIZE)
 944                return -EINVAL;
 945
 946        memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
 947               CTR_RFC3686_NONCE_SIZE);
 948
 949        key_len -= CTR_RFC3686_NONCE_SIZE;
 950        return ablk_setkey(tfm, key, key_len);
 951}
 952
 953static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
 954{
 955        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
 956        struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
 957        struct ablk_ctx *rctx = skcipher_request_ctx(areq);
 958        int err;
 959
 960        skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
 961        skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
 962                                      areq->base.complete, areq->base.data);
 963        skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
 964                                   areq->cryptlen, areq->iv);
 965        if (encrypt)
 966                err = crypto_skcipher_encrypt(&rctx->fallback_req);
 967        else
 968                err = crypto_skcipher_decrypt(&rctx->fallback_req);
 969        return err;
 970}
 971
 972static int ablk_perform(struct skcipher_request *req, int encrypt)
 973{
 974        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 975        struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
 976        unsigned int ivsize = crypto_skcipher_ivsize(tfm);
 977        struct ix_sa_dir *dir;
 978        struct crypt_ctl *crypt;
 979        unsigned int nbytes = req->cryptlen;
 980        enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
 981        struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
 982        struct buffer_desc src_hook;
 983        struct device *dev = &pdev->dev;
 984        unsigned int offset;
 985        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 986                                GFP_KERNEL : GFP_ATOMIC;
 987
 988        if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
 989                return ixp4xx_cipher_fallback(req, encrypt);
 990
 991        if (qmgr_stat_full(send_qid))
 992                return -EAGAIN;
 993        if (atomic_read(&ctx->configuring))
 994                return -EAGAIN;
 995
 996        dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 997        req_ctx->encrypt = encrypt;
 998
 999        crypt = get_crypt_desc();
1000        if (!crypt)
1001                return -ENOMEM;
1002
1003        crypt->data.ablk_req = req;
1004        crypt->crypto_ctx = dir->npe_ctx_phys;
1005        crypt->mode = dir->npe_mode;
1006        crypt->init_len = dir->npe_ctx_idx;
1007
1008        crypt->crypt_offs = 0;
1009        crypt->crypt_len = nbytes;
1010
1011        BUG_ON(ivsize && !req->iv);
1012        memcpy(crypt->iv, req->iv, ivsize);
1013        if (ivsize > 0 && !encrypt) {
1014                offset = req->cryptlen - ivsize;
1015                scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
1016        }
1017        if (req->src != req->dst) {
1018                struct buffer_desc dst_hook;
1019
1020                crypt->mode |= NPE_OP_NOT_IN_PLACE;
1021                /* This was never tested by Intel
1022                 * for more than one dst buffer, I think. */
1023                req_ctx->dst = NULL;
1024                if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
1025                                     flags, DMA_FROM_DEVICE))
1026                        goto free_buf_dest;
1027                src_direction = DMA_TO_DEVICE;
1028                req_ctx->dst = dst_hook.next;
1029                crypt->dst_buf = dst_hook.phys_next;
1030        } else {
1031                req_ctx->dst = NULL;
1032        }
1033        req_ctx->src = NULL;
1034        if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
1035                             src_direction))
1036                goto free_buf_src;
1037
1038        req_ctx->src = src_hook.next;
1039        crypt->src_buf = src_hook.phys_next;
1040        crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
1041        qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
1042        BUG_ON(qmgr_stat_overflow(send_qid));
1043        return -EINPROGRESS;
1044
1045free_buf_src:
1046        free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1047free_buf_dest:
1048        if (req->src != req->dst)
1049                free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1050
1051        crypt->ctl_flags = CTL_FLAG_UNUSED;
1052        return -ENOMEM;
1053}
1054
1055static int ablk_encrypt(struct skcipher_request *req)
1056{
1057        return ablk_perform(req, 1);
1058}
1059
1060static int ablk_decrypt(struct skcipher_request *req)
1061{
1062        return ablk_perform(req, 0);
1063}
1064
1065static int ablk_rfc3686_crypt(struct skcipher_request *req)
1066{
1067        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1068        struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
1069        u8 iv[CTR_RFC3686_BLOCK_SIZE];
1070        u8 *info = req->iv;
1071        int ret;
1072
1073        /* set up counter block */
1074        memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
1075        memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
1076
1077        /* initialize counter portion of counter block */
1078        *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
1079                cpu_to_be32(1);
1080
1081        req->iv = iv;
1082        ret = ablk_perform(req, 1);
1083        req->iv = info;
1084        return ret;
1085}
1086
1087static int aead_perform(struct aead_request *req, int encrypt,
1088                        int cryptoffset, int eff_cryptlen, u8 *iv)
1089{
1090        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1091        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1092        unsigned int ivsize = crypto_aead_ivsize(tfm);
1093        unsigned int authsize = crypto_aead_authsize(tfm);
1094        struct ix_sa_dir *dir;
1095        struct crypt_ctl *crypt;
1096        unsigned int cryptlen;
1097        struct buffer_desc *buf, src_hook;
1098        struct aead_ctx *req_ctx = aead_request_ctx(req);
1099        struct device *dev = &pdev->dev;
1100        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1101                                GFP_KERNEL : GFP_ATOMIC;
1102        enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
1103        unsigned int lastlen;
1104
1105        if (qmgr_stat_full(send_qid))
1106                return -EAGAIN;
1107        if (atomic_read(&ctx->configuring))
1108                return -EAGAIN;
1109
1110        if (encrypt) {
1111                dir = &ctx->encrypt;
1112                cryptlen = req->cryptlen;
1113        } else {
1114                dir = &ctx->decrypt;
1115                /* req->cryptlen includes the authsize when decrypting */
1116                cryptlen = req->cryptlen - authsize;
1117                eff_cryptlen -= authsize;
1118        }
1119        crypt = get_crypt_desc();
1120        if (!crypt)
1121                return -ENOMEM;
1122
1123        crypt->data.aead_req = req;
1124        crypt->crypto_ctx = dir->npe_ctx_phys;
1125        crypt->mode = dir->npe_mode;
1126        crypt->init_len = dir->npe_ctx_idx;
1127
1128        crypt->crypt_offs = cryptoffset;
1129        crypt->crypt_len = eff_cryptlen;
1130
1131        crypt->auth_offs = 0;
1132        crypt->auth_len = req->assoclen + cryptlen;
1133        BUG_ON(ivsize && !req->iv);
1134        memcpy(crypt->iv, req->iv, ivsize);
1135
1136        buf = chainup_buffers(dev, req->src, crypt->auth_len,
1137                              &src_hook, flags, src_direction);
1138        req_ctx->src = src_hook.next;
1139        crypt->src_buf = src_hook.phys_next;
1140        if (!buf)
1141                goto free_buf_src;
1142
1143        lastlen = buf->buf_len;
1144        if (lastlen >= authsize)
1145                crypt->icv_rev_aes = buf->phys_addr +
1146                                     buf->buf_len - authsize;
1147
1148        req_ctx->dst = NULL;
1149
1150        if (req->src != req->dst) {
1151                struct buffer_desc dst_hook;
1152
1153                crypt->mode |= NPE_OP_NOT_IN_PLACE;
1154                src_direction = DMA_TO_DEVICE;
1155
1156                buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1157                                      &dst_hook, flags, DMA_FROM_DEVICE);
1158                req_ctx->dst = dst_hook.next;
1159                crypt->dst_buf = dst_hook.phys_next;
1160
1161                if (!buf)
1162                        goto free_buf_dst;
1163
1164                if (encrypt) {
1165                        lastlen = buf->buf_len;
1166                        if (lastlen >= authsize)
1167                                crypt->icv_rev_aes = buf->phys_addr +
1168                                                     buf->buf_len - authsize;
1169                }
1170        }
1171
1172        if (unlikely(lastlen < authsize)) {
1173                /* The 12 hmac bytes are scattered,
1174                 * we need to copy them into a safe buffer */
1175                req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1176                                                    &crypt->icv_rev_aes);
1177                if (unlikely(!req_ctx->hmac_virt))
1178                        goto free_buf_dst;
1179                if (!encrypt) {
1180                        scatterwalk_map_and_copy(req_ctx->hmac_virt,
1181                                                 req->src, cryptlen, authsize, 0);
1182                }
1183                req_ctx->encrypt = encrypt;
1184        } else {
1185                req_ctx->hmac_virt = NULL;
1186        }
1187
1188        crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1189        qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
1190        BUG_ON(qmgr_stat_overflow(send_qid));
1191        return -EINPROGRESS;
1192
1193free_buf_dst:
1194        free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1195free_buf_src:
1196        free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1197        crypt->ctl_flags = CTL_FLAG_UNUSED;
1198        return -ENOMEM;
1199}
1200
1201static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1202{
1203        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1204        unsigned int digest_len = crypto_aead_maxauthsize(tfm);
1205        int ret;
1206
1207        if (!ctx->enckey_len && !ctx->authkey_len)
1208                return 0;
1209        init_completion(&ctx->completion);
1210        atomic_inc(&ctx->configuring);
1211
1212        reset_sa_dir(&ctx->encrypt);
1213        reset_sa_dir(&ctx->decrypt);
1214
1215        ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1216        if (ret)
1217                goto out;
1218        ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1219        if (ret)
1220                goto out;
1221        ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1222                         ctx->authkey_len, digest_len);
1223        if (ret)
1224                goto out;
1225        ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
1226                         ctx->authkey_len, digest_len);
1227out:
1228        if (!atomic_dec_and_test(&ctx->configuring))
1229                wait_for_completion(&ctx->completion);
1230        return ret;
1231}
1232
1233static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1234{
1235        int max = crypto_aead_maxauthsize(tfm) >> 2;
1236
1237        if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
1238                return -EINVAL;
1239        return aead_setup(tfm, authsize);
1240}
1241
1242static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1243                       unsigned int keylen)
1244{
1245        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1246        struct crypto_authenc_keys keys;
1247
1248        if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1249                goto badkey;
1250
1251        if (keys.authkeylen > sizeof(ctx->authkey))
1252                goto badkey;
1253
1254        if (keys.enckeylen > sizeof(ctx->enckey))
1255                goto badkey;
1256
1257        memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1258        memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1259        ctx->authkey_len = keys.authkeylen;
1260        ctx->enckey_len = keys.enckeylen;
1261
1262        memzero_explicit(&keys, sizeof(keys));
1263        return aead_setup(tfm, crypto_aead_authsize(tfm));
1264badkey:
1265        memzero_explicit(&keys, sizeof(keys));
1266        return -EINVAL;
1267}
1268
1269static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1270                            unsigned int keylen)
1271{
1272        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1273        struct crypto_authenc_keys keys;
1274        int err;
1275
1276        err = crypto_authenc_extractkeys(&keys, key, keylen);
1277        if (unlikely(err))
1278                goto badkey;
1279
1280        err = -EINVAL;
1281        if (keys.authkeylen > sizeof(ctx->authkey))
1282                goto badkey;
1283
1284        err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
1285        if (err)
1286                goto badkey;
1287
1288        memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1289        memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1290        ctx->authkey_len = keys.authkeylen;
1291        ctx->enckey_len = keys.enckeylen;
1292
1293        memzero_explicit(&keys, sizeof(keys));
1294        return aead_setup(tfm, crypto_aead_authsize(tfm));
1295badkey:
1296        memzero_explicit(&keys, sizeof(keys));
1297        return err;
1298}
1299
1300static int aead_encrypt(struct aead_request *req)
1301{
1302        return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1303}
1304
1305static int aead_decrypt(struct aead_request *req)
1306{
1307        return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1308}
1309
1310static struct ixp_alg ixp4xx_algos[] = {
1311{
1312        .crypto = {
1313                .base.cra_name          = "cbc(des)",
1314                .base.cra_blocksize     = DES_BLOCK_SIZE,
1315
1316                .min_keysize            = DES_KEY_SIZE,
1317                .max_keysize            = DES_KEY_SIZE,
1318                .ivsize                 = DES_BLOCK_SIZE,
1319        },
1320        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1321        .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1322
1323}, {
1324        .crypto = {
1325                .base.cra_name          = "ecb(des)",
1326                .base.cra_blocksize     = DES_BLOCK_SIZE,
1327                .min_keysize            = DES_KEY_SIZE,
1328                .max_keysize            = DES_KEY_SIZE,
1329        },
1330        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1331        .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1332}, {
1333        .crypto = {
1334                .base.cra_name          = "cbc(des3_ede)",
1335                .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
1336
1337                .min_keysize            = DES3_EDE_KEY_SIZE,
1338                .max_keysize            = DES3_EDE_KEY_SIZE,
1339                .ivsize                 = DES3_EDE_BLOCK_SIZE,
1340                .setkey                 = ablk_des3_setkey,
1341        },
1342        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1343        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1344}, {
1345        .crypto = {
1346                .base.cra_name          = "ecb(des3_ede)",
1347                .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
1348
1349                .min_keysize            = DES3_EDE_KEY_SIZE,
1350                .max_keysize            = DES3_EDE_KEY_SIZE,
1351                .setkey                 = ablk_des3_setkey,
1352        },
1353        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1354        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1355}, {
1356        .crypto = {
1357                .base.cra_name          = "cbc(aes)",
1358                .base.cra_blocksize     = AES_BLOCK_SIZE,
1359
1360                .min_keysize            = AES_MIN_KEY_SIZE,
1361                .max_keysize            = AES_MAX_KEY_SIZE,
1362                .ivsize                 = AES_BLOCK_SIZE,
1363        },
1364        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1365        .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1366}, {
1367        .crypto = {
1368                .base.cra_name          = "ecb(aes)",
1369                .base.cra_blocksize     = AES_BLOCK_SIZE,
1370
1371                .min_keysize            = AES_MIN_KEY_SIZE,
1372                .max_keysize            = AES_MAX_KEY_SIZE,
1373        },
1374        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1375        .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1376}, {
1377        .crypto = {
1378                .base.cra_name          = "ctr(aes)",
1379                .base.cra_blocksize     = 1,
1380
1381                .min_keysize            = AES_MIN_KEY_SIZE,
1382                .max_keysize            = AES_MAX_KEY_SIZE,
1383                .ivsize                 = AES_BLOCK_SIZE,
1384        },
1385        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1386        .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1387}, {
1388        .crypto = {
1389                .base.cra_name          = "rfc3686(ctr(aes))",
1390                .base.cra_blocksize     = 1,
1391
1392                .min_keysize            = AES_MIN_KEY_SIZE,
1393                .max_keysize            = AES_MAX_KEY_SIZE,
1394                .ivsize                 = AES_BLOCK_SIZE,
1395                .setkey                 = ablk_rfc3686_setkey,
1396                .encrypt                = ablk_rfc3686_crypt,
1397                .decrypt                = ablk_rfc3686_crypt,
1398        },
1399        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1400        .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1401} };
1402
1403static struct ixp_aead_alg ixp4xx_aeads[] = {
1404{
1405        .crypto = {
1406                .base = {
1407                        .cra_name       = "authenc(hmac(md5),cbc(des))",
1408                        .cra_blocksize  = DES_BLOCK_SIZE,
1409                },
1410                .ivsize         = DES_BLOCK_SIZE,
1411                .maxauthsize    = MD5_DIGEST_SIZE,
1412        },
1413        .hash = &hash_alg_md5,
1414        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1415        .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1416}, {
1417        .crypto = {
1418                .base = {
1419                        .cra_name       = "authenc(hmac(md5),cbc(des3_ede))",
1420                        .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
1421                },
1422                .ivsize         = DES3_EDE_BLOCK_SIZE,
1423                .maxauthsize    = MD5_DIGEST_SIZE,
1424                .setkey         = des3_aead_setkey,
1425        },
1426        .hash = &hash_alg_md5,
1427        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1428        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1429}, {
1430        .crypto = {
1431                .base = {
1432                        .cra_name       = "authenc(hmac(sha1),cbc(des))",
1433                        .cra_blocksize  = DES_BLOCK_SIZE,
1434                },
1435                        .ivsize         = DES_BLOCK_SIZE,
1436                        .maxauthsize    = SHA1_DIGEST_SIZE,
1437        },
1438        .hash = &hash_alg_sha1,
1439        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1440        .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1441}, {
1442        .crypto = {
1443                .base = {
1444                        .cra_name       = "authenc(hmac(sha1),cbc(des3_ede))",
1445                        .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
1446                },
1447                .ivsize         = DES3_EDE_BLOCK_SIZE,
1448                .maxauthsize    = SHA1_DIGEST_SIZE,
1449                .setkey         = des3_aead_setkey,
1450        },
1451        .hash = &hash_alg_sha1,
1452        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1453        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1454}, {
1455        .crypto = {
1456                .base = {
1457                        .cra_name       = "authenc(hmac(md5),cbc(aes))",
1458                        .cra_blocksize  = AES_BLOCK_SIZE,
1459                },
1460                .ivsize         = AES_BLOCK_SIZE,
1461                .maxauthsize    = MD5_DIGEST_SIZE,
1462        },
1463        .hash = &hash_alg_md5,
1464        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1465        .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1466}, {
1467        .crypto = {
1468                .base = {
1469                        .cra_name       = "authenc(hmac(sha1),cbc(aes))",
1470                        .cra_blocksize  = AES_BLOCK_SIZE,
1471                },
1472                .ivsize         = AES_BLOCK_SIZE,
1473                .maxauthsize    = SHA1_DIGEST_SIZE,
1474        },
1475        .hash = &hash_alg_sha1,
1476        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1477        .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1478} };
1479
1480#define IXP_POSTFIX "-ixp4xx"
1481
1482static int ixp_crypto_probe(struct platform_device *_pdev)
1483{
1484        struct device *dev = &_pdev->dev;
1485        int num = ARRAY_SIZE(ixp4xx_algos);
1486        int i, err;
1487
1488        pdev = _pdev;
1489
1490        err = init_ixp_crypto(dev);
1491        if (err)
1492                return err;
1493
1494        for (i = 0; i < num; i++) {
1495                struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
1496
1497                if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1498                             "%s"IXP_POSTFIX, cra->base.cra_name) >=
1499                             CRYPTO_MAX_ALG_NAME)
1500                        continue;
1501                if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1502                        continue;
1503
1504                /* block ciphers */
1505                cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1506                                      CRYPTO_ALG_ASYNC |
1507                                      CRYPTO_ALG_ALLOCATES_MEMORY |
1508                                      CRYPTO_ALG_NEED_FALLBACK;
1509                if (!cra->setkey)
1510                        cra->setkey = ablk_setkey;
1511                if (!cra->encrypt)
1512                        cra->encrypt = ablk_encrypt;
1513                if (!cra->decrypt)
1514                        cra->decrypt = ablk_decrypt;
1515                cra->init = init_tfm_ablk;
1516                cra->exit = exit_tfm_ablk;
1517
1518                cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1519                cra->base.cra_module = THIS_MODULE;
1520                cra->base.cra_alignmask = 3;
1521                cra->base.cra_priority = 300;
1522                if (crypto_register_skcipher(cra))
1523                        dev_err(&pdev->dev, "Failed to register '%s'\n",
1524                                cra->base.cra_name);
1525                else
1526                        ixp4xx_algos[i].registered = 1;
1527        }
1528
1529        for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1530                struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1531
1532                if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1533                             "%s"IXP_POSTFIX, cra->base.cra_name) >=
1534                    CRYPTO_MAX_ALG_NAME)
1535                        continue;
1536                if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1537                        continue;
1538
1539                /* authenc */
1540                cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1541                                      CRYPTO_ALG_ASYNC |
1542                                      CRYPTO_ALG_ALLOCATES_MEMORY;
1543                cra->setkey = cra->setkey ?: aead_setkey;
1544                cra->setauthsize = aead_setauthsize;
1545                cra->encrypt = aead_encrypt;
1546                cra->decrypt = aead_decrypt;
1547                cra->init = init_tfm_aead;
1548                cra->exit = exit_tfm_aead;
1549
1550                cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1551                cra->base.cra_module = THIS_MODULE;
1552                cra->base.cra_alignmask = 3;
1553                cra->base.cra_priority = 300;
1554
1555                if (crypto_register_aead(cra))
1556                        dev_err(&pdev->dev, "Failed to register '%s'\n",
1557                                cra->base.cra_driver_name);
1558                else
1559                        ixp4xx_aeads[i].registered = 1;
1560        }
1561        return 0;
1562}
1563
1564static int ixp_crypto_remove(struct platform_device *pdev)
1565{
1566        int num = ARRAY_SIZE(ixp4xx_algos);
1567        int i;
1568
1569        for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1570                if (ixp4xx_aeads[i].registered)
1571                        crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1572        }
1573
1574        for (i = 0; i < num; i++) {
1575                if (ixp4xx_algos[i].registered)
1576                        crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
1577        }
1578        release_ixp_crypto(&pdev->dev);
1579
1580        return 0;
1581}
1582static const struct of_device_id ixp4xx_crypto_of_match[] = {
1583        {
1584                .compatible = "intel,ixp4xx-crypto",
1585        },
1586        {},
1587};
1588
1589static struct platform_driver ixp_crypto_driver = {
1590        .probe = ixp_crypto_probe,
1591        .remove = ixp_crypto_remove,
1592        .driver = {
1593                .name = "ixp4xx_crypto",
1594                .of_match_table = ixp4xx_crypto_of_match,
1595        },
1596};
1597module_platform_driver(ixp_crypto_driver);
1598
1599MODULE_LICENSE("GPL");
1600MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1601MODULE_DESCRIPTION("IXP4xx hardware crypto");
1602
1603