1
2
3
4
5
6
7
8
9
10
11
12#include <linux/platform_device.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmapool.h>
15#include <linux/crypto.h>
16#include <linux/kernel.h>
17#include <linux/rtnetlink.h>
18#include <linux/interrupt.h>
19#include <linux/spinlock.h>
20#include <linux/gfp.h>
21#include <linux/module.h>
22
23#include <crypto/ctr.h>
24#include <crypto/des.h>
25#include <crypto/aes.h>
26#include <crypto/hmac.h>
27#include <crypto/sha.h>
28#include <crypto/algapi.h>
29#include <crypto/internal/aead.h>
30#include <crypto/authenc.h>
31#include <crypto/scatterwalk.h>
32
33#include <mach/npe.h>
34#include <mach/qmgr.h>
35
36#define MAX_KEYLEN 32
37
38
39#define NPE_CTX_LEN 80
40#define AES_BLOCK128 16
41
42#define NPE_OP_HASH_VERIFY 0x01
43#define NPE_OP_CCM_ENABLE 0x04
44#define NPE_OP_CRYPT_ENABLE 0x08
45#define NPE_OP_HASH_ENABLE 0x10
46#define NPE_OP_NOT_IN_PLACE 0x20
47#define NPE_OP_HMAC_DISABLE 0x40
48#define NPE_OP_CRYPT_ENCRYPT 0x80
49
50#define NPE_OP_CCM_GEN_MIC 0xcc
51#define NPE_OP_HASH_GEN_ICV 0x50
52#define NPE_OP_ENC_GEN_KEY 0xc9
53
54#define MOD_ECB 0x0000
55#define MOD_CTR 0x1000
56#define MOD_CBC_ENC 0x2000
57#define MOD_CBC_DEC 0x3000
58#define MOD_CCM_ENC 0x4000
59#define MOD_CCM_DEC 0x5000
60
61#define KEYLEN_128 4
62#define KEYLEN_192 6
63#define KEYLEN_256 8
64
65#define CIPH_DECR 0x0000
66#define CIPH_ENCR 0x0400
67
68#define MOD_DES 0x0000
69#define MOD_TDEA2 0x0100
70#define MOD_3DES 0x0200
71#define MOD_AES 0x0800
72#define MOD_AES128 (0x0800 | KEYLEN_128)
73#define MOD_AES192 (0x0900 | KEYLEN_192)
74#define MOD_AES256 (0x0a00 | KEYLEN_256)
75
76#define MAX_IVLEN 16
77#define NPE_ID 2
78#define NPE_QLEN 16
79
80
81#define NPE_QLEN_TOTAL 64
82
83#define SEND_QID 29
84#define RECV_QID 30
85
86#define CTL_FLAG_UNUSED 0x0000
87#define CTL_FLAG_USED 0x1000
88#define CTL_FLAG_PERFORM_ABLK 0x0001
89#define CTL_FLAG_GEN_ICV 0x0002
90#define CTL_FLAG_GEN_REVAES 0x0004
91#define CTL_FLAG_PERFORM_AEAD 0x0008
92#define CTL_FLAG_MASK 0x000f
93
94#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
95
96#define MD5_DIGEST_SIZE 16
97
98struct buffer_desc {
99 u32 phys_next;
100#ifdef __ARMEB__
101 u16 buf_len;
102 u16 pkt_len;
103#else
104 u16 pkt_len;
105 u16 buf_len;
106#endif
107 u32 phys_addr;
108 u32 __reserved[4];
109 struct buffer_desc *next;
110 enum dma_data_direction dir;
111};
112
113struct crypt_ctl {
114#ifdef __ARMEB__
115 u8 mode;
116 u8 init_len;
117 u16 reserved;
118#else
119 u16 reserved;
120 u8 init_len;
121 u8 mode;
122#endif
123 u8 iv[MAX_IVLEN];
124 u32 icv_rev_aes;
125 u32 src_buf;
126 u32 dst_buf;
127#ifdef __ARMEB__
128 u16 auth_offs;
129 u16 auth_len;
130 u16 crypt_offs;
131 u16 crypt_len;
132#else
133 u16 auth_len;
134 u16 auth_offs;
135 u16 crypt_len;
136 u16 crypt_offs;
137#endif
138 u32 aadAddr;
139 u32 crypto_ctx;
140
141
142 unsigned ctl_flags;
143 union {
144 struct ablkcipher_request *ablk_req;
145 struct aead_request *aead_req;
146 struct crypto_tfm *tfm;
147 } data;
148 struct buffer_desc *regist_buf;
149 u8 *regist_ptr;
150};
151
152struct ablk_ctx {
153 struct buffer_desc *src;
154 struct buffer_desc *dst;
155};
156
157struct aead_ctx {
158 struct buffer_desc *src;
159 struct buffer_desc *dst;
160 struct scatterlist ivlist;
161
162 u8 *hmac_virt;
163 int encrypt;
164};
165
166struct ix_hash_algo {
167 u32 cfgword;
168 unsigned char *icv;
169};
170
171struct ix_sa_dir {
172 unsigned char *npe_ctx;
173 dma_addr_t npe_ctx_phys;
174 int npe_ctx_idx;
175 u8 npe_mode;
176};
177
178struct ixp_ctx {
179 struct ix_sa_dir encrypt;
180 struct ix_sa_dir decrypt;
181 int authkey_len;
182 u8 authkey[MAX_KEYLEN];
183 int enckey_len;
184 u8 enckey[MAX_KEYLEN];
185 u8 salt[MAX_IVLEN];
186 u8 nonce[CTR_RFC3686_NONCE_SIZE];
187 unsigned salted;
188 atomic_t configuring;
189 struct completion completion;
190};
191
192struct ixp_alg {
193 struct crypto_alg crypto;
194 const struct ix_hash_algo *hash;
195 u32 cfg_enc;
196 u32 cfg_dec;
197
198 int registered;
199};
200
201struct ixp_aead_alg {
202 struct aead_alg crypto;
203 const struct ix_hash_algo *hash;
204 u32 cfg_enc;
205 u32 cfg_dec;
206
207 int registered;
208};
209
210static const struct ix_hash_algo hash_alg_md5 = {
211 .cfgword = 0xAA010004,
212 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
213 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
214};
215static const struct ix_hash_algo hash_alg_sha1 = {
216 .cfgword = 0x00000005,
217 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
218 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
219};
220
221static struct npe *npe_c;
222static struct dma_pool *buffer_pool = NULL;
223static struct dma_pool *ctx_pool = NULL;
224
225static struct crypt_ctl *crypt_virt = NULL;
226static dma_addr_t crypt_phys;
227
228static int support_aes = 1;
229
230#define DRIVER_NAME "ixp4xx_crypto"
231
232static struct platform_device *pdev;
233
234static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
235{
236 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
237}
238
239static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
240{
241 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
242}
243
244static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
245{
246 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
247}
248
249static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
250{
251 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
252}
253
254static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
255{
256 return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
257}
258
259static int setup_crypt_desc(void)
260{
261 struct device *dev = &pdev->dev;
262 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
263 crypt_virt = dma_zalloc_coherent(dev,
264 NPE_QLEN * sizeof(struct crypt_ctl),
265 &crypt_phys, GFP_ATOMIC);
266 if (!crypt_virt)
267 return -ENOMEM;
268 return 0;
269}
270
271static spinlock_t desc_lock;
272static struct crypt_ctl *get_crypt_desc(void)
273{
274 int i;
275 static int idx = 0;
276 unsigned long flags;
277
278 spin_lock_irqsave(&desc_lock, flags);
279
280 if (unlikely(!crypt_virt))
281 setup_crypt_desc();
282 if (unlikely(!crypt_virt)) {
283 spin_unlock_irqrestore(&desc_lock, flags);
284 return NULL;
285 }
286 i = idx;
287 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
288 if (++idx >= NPE_QLEN)
289 idx = 0;
290 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
291 spin_unlock_irqrestore(&desc_lock, flags);
292 return crypt_virt +i;
293 } else {
294 spin_unlock_irqrestore(&desc_lock, flags);
295 return NULL;
296 }
297}
298
299static spinlock_t emerg_lock;
300static struct crypt_ctl *get_crypt_desc_emerg(void)
301{
302 int i;
303 static int idx = NPE_QLEN;
304 struct crypt_ctl *desc;
305 unsigned long flags;
306
307 desc = get_crypt_desc();
308 if (desc)
309 return desc;
310 if (unlikely(!crypt_virt))
311 return NULL;
312
313 spin_lock_irqsave(&emerg_lock, flags);
314 i = idx;
315 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
316 if (++idx >= NPE_QLEN_TOTAL)
317 idx = NPE_QLEN;
318 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
319 spin_unlock_irqrestore(&emerg_lock, flags);
320 return crypt_virt +i;
321 } else {
322 spin_unlock_irqrestore(&emerg_lock, flags);
323 return NULL;
324 }
325}
326
327static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
328{
329 while (buf) {
330 struct buffer_desc *buf1;
331 u32 phys1;
332
333 buf1 = buf->next;
334 phys1 = buf->phys_next;
335 dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
336 dma_pool_free(buffer_pool, buf, phys);
337 buf = buf1;
338 phys = phys1;
339 }
340}
341
342static struct tasklet_struct crypto_done_tasklet;
343
344static void finish_scattered_hmac(struct crypt_ctl *crypt)
345{
346 struct aead_request *req = crypt->data.aead_req;
347 struct aead_ctx *req_ctx = aead_request_ctx(req);
348 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
349 int authsize = crypto_aead_authsize(tfm);
350 int decryptlen = req->assoclen + req->cryptlen - authsize;
351
352 if (req_ctx->encrypt) {
353 scatterwalk_map_and_copy(req_ctx->hmac_virt,
354 req->dst, decryptlen, authsize, 1);
355 }
356 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
357}
358
359static void one_packet(dma_addr_t phys)
360{
361 struct device *dev = &pdev->dev;
362 struct crypt_ctl *crypt;
363 struct ixp_ctx *ctx;
364 int failed;
365
366 failed = phys & 0x1 ? -EBADMSG : 0;
367 phys &= ~0x3;
368 crypt = crypt_phys2virt(phys);
369
370 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
371 case CTL_FLAG_PERFORM_AEAD: {
372 struct aead_request *req = crypt->data.aead_req;
373 struct aead_ctx *req_ctx = aead_request_ctx(req);
374
375 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
376 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
377 if (req_ctx->hmac_virt) {
378 finish_scattered_hmac(crypt);
379 }
380 req->base.complete(&req->base, failed);
381 break;
382 }
383 case CTL_FLAG_PERFORM_ABLK: {
384 struct ablkcipher_request *req = crypt->data.ablk_req;
385 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
386
387 if (req_ctx->dst) {
388 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
389 }
390 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
391 req->base.complete(&req->base, failed);
392 break;
393 }
394 case CTL_FLAG_GEN_ICV:
395 ctx = crypto_tfm_ctx(crypt->data.tfm);
396 dma_pool_free(ctx_pool, crypt->regist_ptr,
397 crypt->regist_buf->phys_addr);
398 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
399 if (atomic_dec_and_test(&ctx->configuring))
400 complete(&ctx->completion);
401 break;
402 case CTL_FLAG_GEN_REVAES:
403 ctx = crypto_tfm_ctx(crypt->data.tfm);
404 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
405 if (atomic_dec_and_test(&ctx->configuring))
406 complete(&ctx->completion);
407 break;
408 default:
409 BUG();
410 }
411 crypt->ctl_flags = CTL_FLAG_UNUSED;
412}
413
414static void irqhandler(void *_unused)
415{
416 tasklet_schedule(&crypto_done_tasklet);
417}
418
419static void crypto_done_action(unsigned long arg)
420{
421 int i;
422
423 for(i=0; i<4; i++) {
424 dma_addr_t phys = qmgr_get_entry(RECV_QID);
425 if (!phys)
426 return;
427 one_packet(phys);
428 }
429 tasklet_schedule(&crypto_done_tasklet);
430}
431
432static int init_ixp_crypto(struct device *dev)
433{
434 int ret = -ENODEV;
435 u32 msg[2] = { 0, 0 };
436
437 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
438 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
439 printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
440 return ret;
441 }
442 npe_c = npe_request(NPE_ID);
443 if (!npe_c)
444 return ret;
445
446 if (!npe_running(npe_c)) {
447 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
448 if (ret)
449 goto npe_release;
450 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
451 goto npe_error;
452 } else {
453 if (npe_send_message(npe_c, msg, "STATUS_MSG"))
454 goto npe_error;
455
456 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
457 goto npe_error;
458 }
459
460 switch ((msg[1]>>16) & 0xff) {
461 case 3:
462 printk(KERN_WARNING "Firmware of %s lacks AES support\n",
463 npe_name(npe_c));
464 support_aes = 0;
465 break;
466 case 4:
467 case 5:
468 support_aes = 1;
469 break;
470 default:
471 printk(KERN_ERR "Firmware of %s lacks crypto support\n",
472 npe_name(npe_c));
473 ret = -ENODEV;
474 goto npe_release;
475 }
476
477
478
479 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
480 buffer_pool = dma_pool_create("buffer", dev,
481 sizeof(struct buffer_desc), 32, 0);
482 ret = -ENOMEM;
483 if (!buffer_pool) {
484 goto err;
485 }
486 ctx_pool = dma_pool_create("context", dev,
487 NPE_CTX_LEN, 16, 0);
488 if (!ctx_pool) {
489 goto err;
490 }
491 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
492 "ixp_crypto:out", NULL);
493 if (ret)
494 goto err;
495 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
496 "ixp_crypto:in", NULL);
497 if (ret) {
498 qmgr_release_queue(SEND_QID);
499 goto err;
500 }
501 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
502 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
503
504 qmgr_enable_irq(RECV_QID);
505 return 0;
506
507npe_error:
508 printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
509 ret = -EIO;
510err:
511 dma_pool_destroy(ctx_pool);
512 dma_pool_destroy(buffer_pool);
513npe_release:
514 npe_release(npe_c);
515 return ret;
516}
517
518static void release_ixp_crypto(struct device *dev)
519{
520 qmgr_disable_irq(RECV_QID);
521 tasklet_kill(&crypto_done_tasklet);
522
523 qmgr_release_queue(SEND_QID);
524 qmgr_release_queue(RECV_QID);
525
526 dma_pool_destroy(ctx_pool);
527 dma_pool_destroy(buffer_pool);
528
529 npe_release(npe_c);
530
531 if (crypt_virt) {
532 dma_free_coherent(dev,
533 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
534 crypt_virt, crypt_phys);
535 }
536}
537
538static void reset_sa_dir(struct ix_sa_dir *dir)
539{
540 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
541 dir->npe_ctx_idx = 0;
542 dir->npe_mode = 0;
543}
544
545static int init_sa_dir(struct ix_sa_dir *dir)
546{
547 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
548 if (!dir->npe_ctx) {
549 return -ENOMEM;
550 }
551 reset_sa_dir(dir);
552 return 0;
553}
554
555static void free_sa_dir(struct ix_sa_dir *dir)
556{
557 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
558 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
559}
560
561static int init_tfm(struct crypto_tfm *tfm)
562{
563 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
564 int ret;
565
566 atomic_set(&ctx->configuring, 0);
567 ret = init_sa_dir(&ctx->encrypt);
568 if (ret)
569 return ret;
570 ret = init_sa_dir(&ctx->decrypt);
571 if (ret) {
572 free_sa_dir(&ctx->encrypt);
573 }
574 return ret;
575}
576
577static int init_tfm_ablk(struct crypto_tfm *tfm)
578{
579 tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
580 return init_tfm(tfm);
581}
582
583static int init_tfm_aead(struct crypto_aead *tfm)
584{
585 crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
586 return init_tfm(crypto_aead_tfm(tfm));
587}
588
589static void exit_tfm(struct crypto_tfm *tfm)
590{
591 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
592 free_sa_dir(&ctx->encrypt);
593 free_sa_dir(&ctx->decrypt);
594}
595
596static void exit_tfm_aead(struct crypto_aead *tfm)
597{
598 exit_tfm(crypto_aead_tfm(tfm));
599}
600
601static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
602 int init_len, u32 ctx_addr, const u8 *key, int key_len)
603{
604 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
605 struct crypt_ctl *crypt;
606 struct buffer_desc *buf;
607 int i;
608 u8 *pad;
609 u32 pad_phys, buf_phys;
610
611 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
612 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
613 if (!pad)
614 return -ENOMEM;
615 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
616 if (!buf) {
617 dma_pool_free(ctx_pool, pad, pad_phys);
618 return -ENOMEM;
619 }
620 crypt = get_crypt_desc_emerg();
621 if (!crypt) {
622 dma_pool_free(ctx_pool, pad, pad_phys);
623 dma_pool_free(buffer_pool, buf, buf_phys);
624 return -EAGAIN;
625 }
626
627 memcpy(pad, key, key_len);
628 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
629 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
630 pad[i] ^= xpad;
631 }
632
633 crypt->data.tfm = tfm;
634 crypt->regist_ptr = pad;
635 crypt->regist_buf = buf;
636
637 crypt->auth_offs = 0;
638 crypt->auth_len = HMAC_PAD_BLOCKLEN;
639 crypt->crypto_ctx = ctx_addr;
640 crypt->src_buf = buf_phys;
641 crypt->icv_rev_aes = target;
642 crypt->mode = NPE_OP_HASH_GEN_ICV;
643 crypt->init_len = init_len;
644 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
645
646 buf->next = 0;
647 buf->buf_len = HMAC_PAD_BLOCKLEN;
648 buf->pkt_len = 0;
649 buf->phys_addr = pad_phys;
650
651 atomic_inc(&ctx->configuring);
652 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
653 BUG_ON(qmgr_stat_overflow(SEND_QID));
654 return 0;
655}
656
657static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
658 const u8 *key, int key_len, unsigned digest_len)
659{
660 u32 itarget, otarget, npe_ctx_addr;
661 unsigned char *cinfo;
662 int init_len, ret = 0;
663 u32 cfgword;
664 struct ix_sa_dir *dir;
665 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
666 const struct ix_hash_algo *algo;
667
668 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
669 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
670 algo = ix_hash(tfm);
671
672
673 cfgword = algo->cfgword | ( authsize << 6);
674#ifndef __ARMEB__
675 cfgword ^= 0xAA000000;
676#endif
677 *(u32*)cinfo = cpu_to_be32(cfgword);
678 cinfo += sizeof(cfgword);
679
680
681 memcpy(cinfo, algo->icv, digest_len);
682 cinfo += digest_len;
683
684 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
685 + sizeof(algo->cfgword);
686 otarget = itarget + digest_len;
687 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
688 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
689
690 dir->npe_ctx_idx += init_len;
691 dir->npe_mode |= NPE_OP_HASH_ENABLE;
692
693 if (!encrypt)
694 dir->npe_mode |= NPE_OP_HASH_VERIFY;
695
696 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
697 init_len, npe_ctx_addr, key, key_len);
698 if (ret)
699 return ret;
700 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
701 init_len, npe_ctx_addr, key, key_len);
702}
703
704static int gen_rev_aes_key(struct crypto_tfm *tfm)
705{
706 struct crypt_ctl *crypt;
707 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
708 struct ix_sa_dir *dir = &ctx->decrypt;
709
710 crypt = get_crypt_desc_emerg();
711 if (!crypt) {
712 return -EAGAIN;
713 }
714 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
715
716 crypt->data.tfm = tfm;
717 crypt->crypt_offs = 0;
718 crypt->crypt_len = AES_BLOCK128;
719 crypt->src_buf = 0;
720 crypt->crypto_ctx = dir->npe_ctx_phys;
721 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
722 crypt->mode = NPE_OP_ENC_GEN_KEY;
723 crypt->init_len = dir->npe_ctx_idx;
724 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
725
726 atomic_inc(&ctx->configuring);
727 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
728 BUG_ON(qmgr_stat_overflow(SEND_QID));
729 return 0;
730}
731
732static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
733 const u8 *key, int key_len)
734{
735 u8 *cinfo;
736 u32 cipher_cfg;
737 u32 keylen_cfg = 0;
738 struct ix_sa_dir *dir;
739 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
740 u32 *flags = &tfm->crt_flags;
741
742 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
743 cinfo = dir->npe_ctx;
744
745 if (encrypt) {
746 cipher_cfg = cipher_cfg_enc(tfm);
747 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
748 } else {
749 cipher_cfg = cipher_cfg_dec(tfm);
750 }
751 if (cipher_cfg & MOD_AES) {
752 switch (key_len) {
753 case 16: keylen_cfg = MOD_AES128; break;
754 case 24: keylen_cfg = MOD_AES192; break;
755 case 32: keylen_cfg = MOD_AES256; break;
756 default:
757 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
758 return -EINVAL;
759 }
760 cipher_cfg |= keylen_cfg;
761 } else {
762 u32 tmp[DES_EXPKEY_WORDS];
763 if (des_ekey(tmp, key) == 0) {
764 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
765 }
766 }
767
768 *(u32*)cinfo = cpu_to_be32(cipher_cfg);
769 cinfo += sizeof(cipher_cfg);
770
771
772 memcpy(cinfo, key, key_len);
773
774 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
775 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
776 key_len = DES3_EDE_KEY_SIZE;
777 }
778 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
779 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
780 if ((cipher_cfg & MOD_AES) && !encrypt) {
781 return gen_rev_aes_key(tfm);
782 }
783 return 0;
784}
785
786static struct buffer_desc *chainup_buffers(struct device *dev,
787 struct scatterlist *sg, unsigned nbytes,
788 struct buffer_desc *buf, gfp_t flags,
789 enum dma_data_direction dir)
790{
791 for (; nbytes > 0; sg = sg_next(sg)) {
792 unsigned len = min(nbytes, sg->length);
793 struct buffer_desc *next_buf;
794 u32 next_buf_phys;
795 void *ptr;
796
797 nbytes -= len;
798 ptr = sg_virt(sg);
799 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
800 if (!next_buf) {
801 buf = NULL;
802 break;
803 }
804 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
805 buf->next = next_buf;
806 buf->phys_next = next_buf_phys;
807 buf = next_buf;
808
809 buf->phys_addr = sg_dma_address(sg);
810 buf->buf_len = len;
811 buf->dir = dir;
812 }
813 buf->next = NULL;
814 buf->phys_next = 0;
815 return buf;
816}
817
818static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
819 unsigned int key_len)
820{
821 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
822 u32 *flags = &tfm->base.crt_flags;
823 int ret;
824
825 init_completion(&ctx->completion);
826 atomic_inc(&ctx->configuring);
827
828 reset_sa_dir(&ctx->encrypt);
829 reset_sa_dir(&ctx->decrypt);
830
831 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
832 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
833
834 ret = setup_cipher(&tfm->base, 0, key, key_len);
835 if (ret)
836 goto out;
837 ret = setup_cipher(&tfm->base, 1, key, key_len);
838 if (ret)
839 goto out;
840
841 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
842 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
843 ret = -EINVAL;
844 } else {
845 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
846 }
847 }
848out:
849 if (!atomic_dec_and_test(&ctx->configuring))
850 wait_for_completion(&ctx->completion);
851 return ret;
852}
853
854static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
855 unsigned int key_len)
856{
857 u32 flags = crypto_ablkcipher_get_flags(tfm);
858 int err;
859
860 err = __des3_verify_key(&flags, key);
861 if (unlikely(err))
862 crypto_ablkcipher_set_flags(tfm, flags);
863
864 return ablk_setkey(tfm, key, key_len);
865}
866
867static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
868 unsigned int key_len)
869{
870 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
871
872
873 if (key_len < CTR_RFC3686_NONCE_SIZE)
874 return -EINVAL;
875
876 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
877 CTR_RFC3686_NONCE_SIZE);
878
879 key_len -= CTR_RFC3686_NONCE_SIZE;
880 return ablk_setkey(tfm, key, key_len);
881}
882
883static int ablk_perform(struct ablkcipher_request *req, int encrypt)
884{
885 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
886 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
887 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
888 struct ix_sa_dir *dir;
889 struct crypt_ctl *crypt;
890 unsigned int nbytes = req->nbytes;
891 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
892 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
893 struct buffer_desc src_hook;
894 struct device *dev = &pdev->dev;
895 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
896 GFP_KERNEL : GFP_ATOMIC;
897
898 if (qmgr_stat_full(SEND_QID))
899 return -EAGAIN;
900 if (atomic_read(&ctx->configuring))
901 return -EAGAIN;
902
903 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
904
905 crypt = get_crypt_desc();
906 if (!crypt)
907 return -ENOMEM;
908
909 crypt->data.ablk_req = req;
910 crypt->crypto_ctx = dir->npe_ctx_phys;
911 crypt->mode = dir->npe_mode;
912 crypt->init_len = dir->npe_ctx_idx;
913
914 crypt->crypt_offs = 0;
915 crypt->crypt_len = nbytes;
916
917 BUG_ON(ivsize && !req->info);
918 memcpy(crypt->iv, req->info, ivsize);
919 if (req->src != req->dst) {
920 struct buffer_desc dst_hook;
921 crypt->mode |= NPE_OP_NOT_IN_PLACE;
922
923
924 req_ctx->dst = NULL;
925 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
926 flags, DMA_FROM_DEVICE))
927 goto free_buf_dest;
928 src_direction = DMA_TO_DEVICE;
929 req_ctx->dst = dst_hook.next;
930 crypt->dst_buf = dst_hook.phys_next;
931 } else {
932 req_ctx->dst = NULL;
933 }
934 req_ctx->src = NULL;
935 if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
936 flags, src_direction))
937 goto free_buf_src;
938
939 req_ctx->src = src_hook.next;
940 crypt->src_buf = src_hook.phys_next;
941 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
942 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
943 BUG_ON(qmgr_stat_overflow(SEND_QID));
944 return -EINPROGRESS;
945
946free_buf_src:
947 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
948free_buf_dest:
949 if (req->src != req->dst) {
950 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
951 }
952 crypt->ctl_flags = CTL_FLAG_UNUSED;
953 return -ENOMEM;
954}
955
956static int ablk_encrypt(struct ablkcipher_request *req)
957{
958 return ablk_perform(req, 1);
959}
960
961static int ablk_decrypt(struct ablkcipher_request *req)
962{
963 return ablk_perform(req, 0);
964}
965
966static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
967{
968 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
969 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
970 u8 iv[CTR_RFC3686_BLOCK_SIZE];
971 u8 *info = req->info;
972 int ret;
973
974
975 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
976 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
977
978
979 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
980 cpu_to_be32(1);
981
982 req->info = iv;
983 ret = ablk_perform(req, 1);
984 req->info = info;
985 return ret;
986}
987
988static int aead_perform(struct aead_request *req, int encrypt,
989 int cryptoffset, int eff_cryptlen, u8 *iv)
990{
991 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
992 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
993 unsigned ivsize = crypto_aead_ivsize(tfm);
994 unsigned authsize = crypto_aead_authsize(tfm);
995 struct ix_sa_dir *dir;
996 struct crypt_ctl *crypt;
997 unsigned int cryptlen;
998 struct buffer_desc *buf, src_hook;
999 struct aead_ctx *req_ctx = aead_request_ctx(req);
1000 struct device *dev = &pdev->dev;
1001 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1002 GFP_KERNEL : GFP_ATOMIC;
1003 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
1004 unsigned int lastlen;
1005
1006 if (qmgr_stat_full(SEND_QID))
1007 return -EAGAIN;
1008 if (atomic_read(&ctx->configuring))
1009 return -EAGAIN;
1010
1011 if (encrypt) {
1012 dir = &ctx->encrypt;
1013 cryptlen = req->cryptlen;
1014 } else {
1015 dir = &ctx->decrypt;
1016
1017 cryptlen = req->cryptlen -authsize;
1018 eff_cryptlen -= authsize;
1019 }
1020 crypt = get_crypt_desc();
1021 if (!crypt)
1022 return -ENOMEM;
1023
1024 crypt->data.aead_req = req;
1025 crypt->crypto_ctx = dir->npe_ctx_phys;
1026 crypt->mode = dir->npe_mode;
1027 crypt->init_len = dir->npe_ctx_idx;
1028
1029 crypt->crypt_offs = cryptoffset;
1030 crypt->crypt_len = eff_cryptlen;
1031
1032 crypt->auth_offs = 0;
1033 crypt->auth_len = req->assoclen + cryptlen;
1034 BUG_ON(ivsize && !req->iv);
1035 memcpy(crypt->iv, req->iv, ivsize);
1036
1037 buf = chainup_buffers(dev, req->src, crypt->auth_len,
1038 &src_hook, flags, src_direction);
1039 req_ctx->src = src_hook.next;
1040 crypt->src_buf = src_hook.phys_next;
1041 if (!buf)
1042 goto free_buf_src;
1043
1044 lastlen = buf->buf_len;
1045 if (lastlen >= authsize)
1046 crypt->icv_rev_aes = buf->phys_addr +
1047 buf->buf_len - authsize;
1048
1049 req_ctx->dst = NULL;
1050
1051 if (req->src != req->dst) {
1052 struct buffer_desc dst_hook;
1053
1054 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1055 src_direction = DMA_TO_DEVICE;
1056
1057 buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1058 &dst_hook, flags, DMA_FROM_DEVICE);
1059 req_ctx->dst = dst_hook.next;
1060 crypt->dst_buf = dst_hook.phys_next;
1061
1062 if (!buf)
1063 goto free_buf_dst;
1064
1065 if (encrypt) {
1066 lastlen = buf->buf_len;
1067 if (lastlen >= authsize)
1068 crypt->icv_rev_aes = buf->phys_addr +
1069 buf->buf_len - authsize;
1070 }
1071 }
1072
1073 if (unlikely(lastlen < authsize)) {
1074
1075
1076 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1077 &crypt->icv_rev_aes);
1078 if (unlikely(!req_ctx->hmac_virt))
1079 goto free_buf_dst;
1080 if (!encrypt) {
1081 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1082 req->src, cryptlen, authsize, 0);
1083 }
1084 req_ctx->encrypt = encrypt;
1085 } else {
1086 req_ctx->hmac_virt = NULL;
1087 }
1088
1089 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1090 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1091 BUG_ON(qmgr_stat_overflow(SEND_QID));
1092 return -EINPROGRESS;
1093
1094free_buf_dst:
1095 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1096free_buf_src:
1097 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1098 crypt->ctl_flags = CTL_FLAG_UNUSED;
1099 return -ENOMEM;
1100}
1101
1102static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1103{
1104 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1105 u32 *flags = &tfm->base.crt_flags;
1106 unsigned digest_len = crypto_aead_maxauthsize(tfm);
1107 int ret;
1108
1109 if (!ctx->enckey_len && !ctx->authkey_len)
1110 return 0;
1111 init_completion(&ctx->completion);
1112 atomic_inc(&ctx->configuring);
1113
1114 reset_sa_dir(&ctx->encrypt);
1115 reset_sa_dir(&ctx->decrypt);
1116
1117 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1118 if (ret)
1119 goto out;
1120 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1121 if (ret)
1122 goto out;
1123 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1124 ctx->authkey_len, digest_len);
1125 if (ret)
1126 goto out;
1127 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1128 ctx->authkey_len, digest_len);
1129 if (ret)
1130 goto out;
1131
1132 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1133 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1134 ret = -EINVAL;
1135 goto out;
1136 } else {
1137 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1138 }
1139 }
1140out:
1141 if (!atomic_dec_and_test(&ctx->configuring))
1142 wait_for_completion(&ctx->completion);
1143 return ret;
1144}
1145
1146static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1147{
1148 int max = crypto_aead_maxauthsize(tfm) >> 2;
1149
1150 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1151 return -EINVAL;
1152 return aead_setup(tfm, authsize);
1153}
1154
1155static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1156 unsigned int keylen)
1157{
1158 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1159 struct crypto_authenc_keys keys;
1160
1161 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1162 goto badkey;
1163
1164 if (keys.authkeylen > sizeof(ctx->authkey))
1165 goto badkey;
1166
1167 if (keys.enckeylen > sizeof(ctx->enckey))
1168 goto badkey;
1169
1170 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1171 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1172 ctx->authkey_len = keys.authkeylen;
1173 ctx->enckey_len = keys.enckeylen;
1174
1175 memzero_explicit(&keys, sizeof(keys));
1176 return aead_setup(tfm, crypto_aead_authsize(tfm));
1177badkey:
1178 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1179 memzero_explicit(&keys, sizeof(keys));
1180 return -EINVAL;
1181}
1182
1183static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1184 unsigned int keylen)
1185{
1186 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1187 u32 flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
1188 struct crypto_authenc_keys keys;
1189 int err;
1190
1191 err = crypto_authenc_extractkeys(&keys, key, keylen);
1192 if (unlikely(err))
1193 goto badkey;
1194
1195 err = -EINVAL;
1196 if (keys.authkeylen > sizeof(ctx->authkey))
1197 goto badkey;
1198
1199 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
1200 goto badkey;
1201
1202 flags = crypto_aead_get_flags(tfm);
1203 err = __des3_verify_key(&flags, keys.enckey);
1204 if (unlikely(err))
1205 goto badkey;
1206
1207 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1208 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1209 ctx->authkey_len = keys.authkeylen;
1210 ctx->enckey_len = keys.enckeylen;
1211
1212 memzero_explicit(&keys, sizeof(keys));
1213 return aead_setup(tfm, crypto_aead_authsize(tfm));
1214badkey:
1215 crypto_aead_set_flags(tfm, flags);
1216 memzero_explicit(&keys, sizeof(keys));
1217 return err;
1218}
1219
1220static int aead_encrypt(struct aead_request *req)
1221{
1222 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1223}
1224
1225static int aead_decrypt(struct aead_request *req)
1226{
1227 return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1228}
1229
1230static struct ixp_alg ixp4xx_algos[] = {
1231{
1232 .crypto = {
1233 .cra_name = "cbc(des)",
1234 .cra_blocksize = DES_BLOCK_SIZE,
1235 .cra_u = { .ablkcipher = {
1236 .min_keysize = DES_KEY_SIZE,
1237 .max_keysize = DES_KEY_SIZE,
1238 .ivsize = DES_BLOCK_SIZE,
1239 .geniv = "eseqiv",
1240 }
1241 }
1242 },
1243 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1244 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1245
1246}, {
1247 .crypto = {
1248 .cra_name = "ecb(des)",
1249 .cra_blocksize = DES_BLOCK_SIZE,
1250 .cra_u = { .ablkcipher = {
1251 .min_keysize = DES_KEY_SIZE,
1252 .max_keysize = DES_KEY_SIZE,
1253 }
1254 }
1255 },
1256 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1257 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1258}, {
1259 .crypto = {
1260 .cra_name = "cbc(des3_ede)",
1261 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1262 .cra_u = { .ablkcipher = {
1263 .min_keysize = DES3_EDE_KEY_SIZE,
1264 .max_keysize = DES3_EDE_KEY_SIZE,
1265 .ivsize = DES3_EDE_BLOCK_SIZE,
1266 .geniv = "eseqiv",
1267 .setkey = ablk_des3_setkey,
1268 }
1269 }
1270 },
1271 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1272 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1273}, {
1274 .crypto = {
1275 .cra_name = "ecb(des3_ede)",
1276 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1277 .cra_u = { .ablkcipher = {
1278 .min_keysize = DES3_EDE_KEY_SIZE,
1279 .max_keysize = DES3_EDE_KEY_SIZE,
1280 .setkey = ablk_des3_setkey,
1281 }
1282 }
1283 },
1284 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1285 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1286}, {
1287 .crypto = {
1288 .cra_name = "cbc(aes)",
1289 .cra_blocksize = AES_BLOCK_SIZE,
1290 .cra_u = { .ablkcipher = {
1291 .min_keysize = AES_MIN_KEY_SIZE,
1292 .max_keysize = AES_MAX_KEY_SIZE,
1293 .ivsize = AES_BLOCK_SIZE,
1294 .geniv = "eseqiv",
1295 }
1296 }
1297 },
1298 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1299 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1300}, {
1301 .crypto = {
1302 .cra_name = "ecb(aes)",
1303 .cra_blocksize = AES_BLOCK_SIZE,
1304 .cra_u = { .ablkcipher = {
1305 .min_keysize = AES_MIN_KEY_SIZE,
1306 .max_keysize = AES_MAX_KEY_SIZE,
1307 }
1308 }
1309 },
1310 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1311 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1312}, {
1313 .crypto = {
1314 .cra_name = "ctr(aes)",
1315 .cra_blocksize = AES_BLOCK_SIZE,
1316 .cra_u = { .ablkcipher = {
1317 .min_keysize = AES_MIN_KEY_SIZE,
1318 .max_keysize = AES_MAX_KEY_SIZE,
1319 .ivsize = AES_BLOCK_SIZE,
1320 .geniv = "eseqiv",
1321 }
1322 }
1323 },
1324 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1325 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1326}, {
1327 .crypto = {
1328 .cra_name = "rfc3686(ctr(aes))",
1329 .cra_blocksize = AES_BLOCK_SIZE,
1330 .cra_u = { .ablkcipher = {
1331 .min_keysize = AES_MIN_KEY_SIZE,
1332 .max_keysize = AES_MAX_KEY_SIZE,
1333 .ivsize = AES_BLOCK_SIZE,
1334 .geniv = "eseqiv",
1335 .setkey = ablk_rfc3686_setkey,
1336 .encrypt = ablk_rfc3686_crypt,
1337 .decrypt = ablk_rfc3686_crypt }
1338 }
1339 },
1340 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1341 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1342} };
1343
1344static struct ixp_aead_alg ixp4xx_aeads[] = {
1345{
1346 .crypto = {
1347 .base = {
1348 .cra_name = "authenc(hmac(md5),cbc(des))",
1349 .cra_blocksize = DES_BLOCK_SIZE,
1350 },
1351 .ivsize = DES_BLOCK_SIZE,
1352 .maxauthsize = MD5_DIGEST_SIZE,
1353 },
1354 .hash = &hash_alg_md5,
1355 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1356 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1357}, {
1358 .crypto = {
1359 .base = {
1360 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1361 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1362 },
1363 .ivsize = DES3_EDE_BLOCK_SIZE,
1364 .maxauthsize = MD5_DIGEST_SIZE,
1365 .setkey = des3_aead_setkey,
1366 },
1367 .hash = &hash_alg_md5,
1368 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1369 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1370}, {
1371 .crypto = {
1372 .base = {
1373 .cra_name = "authenc(hmac(sha1),cbc(des))",
1374 .cra_blocksize = DES_BLOCK_SIZE,
1375 },
1376 .ivsize = DES_BLOCK_SIZE,
1377 .maxauthsize = SHA1_DIGEST_SIZE,
1378 },
1379 .hash = &hash_alg_sha1,
1380 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1381 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1382}, {
1383 .crypto = {
1384 .base = {
1385 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1386 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1387 },
1388 .ivsize = DES3_EDE_BLOCK_SIZE,
1389 .maxauthsize = SHA1_DIGEST_SIZE,
1390 .setkey = des3_aead_setkey,
1391 },
1392 .hash = &hash_alg_sha1,
1393 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1394 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1395}, {
1396 .crypto = {
1397 .base = {
1398 .cra_name = "authenc(hmac(md5),cbc(aes))",
1399 .cra_blocksize = AES_BLOCK_SIZE,
1400 },
1401 .ivsize = AES_BLOCK_SIZE,
1402 .maxauthsize = MD5_DIGEST_SIZE,
1403 },
1404 .hash = &hash_alg_md5,
1405 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1406 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1407}, {
1408 .crypto = {
1409 .base = {
1410 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1411 .cra_blocksize = AES_BLOCK_SIZE,
1412 },
1413 .ivsize = AES_BLOCK_SIZE,
1414 .maxauthsize = SHA1_DIGEST_SIZE,
1415 },
1416 .hash = &hash_alg_sha1,
1417 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1418 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1419} };
1420
1421#define IXP_POSTFIX "-ixp4xx"
1422
1423static const struct platform_device_info ixp_dev_info __initdata = {
1424 .name = DRIVER_NAME,
1425 .id = 0,
1426 .dma_mask = DMA_BIT_MASK(32),
1427};
1428
1429static int __init ixp_module_init(void)
1430{
1431 int num = ARRAY_SIZE(ixp4xx_algos);
1432 int i, err;
1433
1434 pdev = platform_device_register_full(&ixp_dev_info);
1435 if (IS_ERR(pdev))
1436 return PTR_ERR(pdev);
1437
1438 spin_lock_init(&desc_lock);
1439 spin_lock_init(&emerg_lock);
1440
1441 err = init_ixp_crypto(&pdev->dev);
1442 if (err) {
1443 platform_device_unregister(pdev);
1444 return err;
1445 }
1446 for (i=0; i< num; i++) {
1447 struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1448
1449 if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1450 "%s"IXP_POSTFIX, cra->cra_name) >=
1451 CRYPTO_MAX_ALG_NAME)
1452 {
1453 continue;
1454 }
1455 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1456 continue;
1457 }
1458
1459
1460 cra->cra_type = &crypto_ablkcipher_type;
1461 cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1462 CRYPTO_ALG_KERN_DRIVER_ONLY |
1463 CRYPTO_ALG_ASYNC;
1464 if (!cra->cra_ablkcipher.setkey)
1465 cra->cra_ablkcipher.setkey = ablk_setkey;
1466 if (!cra->cra_ablkcipher.encrypt)
1467 cra->cra_ablkcipher.encrypt = ablk_encrypt;
1468 if (!cra->cra_ablkcipher.decrypt)
1469 cra->cra_ablkcipher.decrypt = ablk_decrypt;
1470 cra->cra_init = init_tfm_ablk;
1471
1472 cra->cra_ctxsize = sizeof(struct ixp_ctx);
1473 cra->cra_module = THIS_MODULE;
1474 cra->cra_alignmask = 3;
1475 cra->cra_priority = 300;
1476 cra->cra_exit = exit_tfm;
1477 if (crypto_register_alg(cra))
1478 printk(KERN_ERR "Failed to register '%s'\n",
1479 cra->cra_name);
1480 else
1481 ixp4xx_algos[i].registered = 1;
1482 }
1483
1484 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1485 struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1486
1487 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1488 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1489 CRYPTO_MAX_ALG_NAME)
1490 continue;
1491 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1492 continue;
1493
1494
1495 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1496 CRYPTO_ALG_ASYNC;
1497 cra->setkey = cra->setkey ?: aead_setkey;
1498 cra->setauthsize = aead_setauthsize;
1499 cra->encrypt = aead_encrypt;
1500 cra->decrypt = aead_decrypt;
1501 cra->init = init_tfm_aead;
1502 cra->exit = exit_tfm_aead;
1503
1504 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1505 cra->base.cra_module = THIS_MODULE;
1506 cra->base.cra_alignmask = 3;
1507 cra->base.cra_priority = 300;
1508
1509 if (crypto_register_aead(cra))
1510 printk(KERN_ERR "Failed to register '%s'\n",
1511 cra->base.cra_driver_name);
1512 else
1513 ixp4xx_aeads[i].registered = 1;
1514 }
1515 return 0;
1516}
1517
1518static void __exit ixp_module_exit(void)
1519{
1520 int num = ARRAY_SIZE(ixp4xx_algos);
1521 int i;
1522
1523 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1524 if (ixp4xx_aeads[i].registered)
1525 crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1526 }
1527
1528 for (i=0; i< num; i++) {
1529 if (ixp4xx_algos[i].registered)
1530 crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1531 }
1532 release_ixp_crypto(&pdev->dev);
1533 platform_device_unregister(pdev);
1534}
1535
1536module_init(ixp_module_init);
1537module_exit(ixp_module_exit);
1538
1539MODULE_LICENSE("GPL");
1540MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1541MODULE_DESCRIPTION("IXP4xx hardware crypto");
1542
1543