1
2
3
4
5
6
7
8#include <linux/platform_device.h>
9#include <linux/dma-mapping.h>
10#include <linux/dmapool.h>
11#include <linux/crypto.h>
12#include <linux/kernel.h>
13#include <linux/rtnetlink.h>
14#include <linux/interrupt.h>
15#include <linux/spinlock.h>
16#include <linux/gfp.h>
17#include <linux/module.h>
18
19#include <crypto/ctr.h>
20#include <crypto/des.h>
21#include <crypto/aes.h>
22#include <crypto/hmac.h>
23#include <crypto/sha.h>
24#include <crypto/algapi.h>
25#include <crypto/internal/aead.h>
26#include <crypto/authenc.h>
27#include <crypto/scatterwalk.h>
28
29#include <linux/soc/ixp4xx/npe.h>
30#include <linux/soc/ixp4xx/qmgr.h>
31
32#define MAX_KEYLEN 32
33
34
35#define NPE_CTX_LEN 80
36#define AES_BLOCK128 16
37
38#define NPE_OP_HASH_VERIFY 0x01
39#define NPE_OP_CCM_ENABLE 0x04
40#define NPE_OP_CRYPT_ENABLE 0x08
41#define NPE_OP_HASH_ENABLE 0x10
42#define NPE_OP_NOT_IN_PLACE 0x20
43#define NPE_OP_HMAC_DISABLE 0x40
44#define NPE_OP_CRYPT_ENCRYPT 0x80
45
46#define NPE_OP_CCM_GEN_MIC 0xcc
47#define NPE_OP_HASH_GEN_ICV 0x50
48#define NPE_OP_ENC_GEN_KEY 0xc9
49
50#define MOD_ECB 0x0000
51#define MOD_CTR 0x1000
52#define MOD_CBC_ENC 0x2000
53#define MOD_CBC_DEC 0x3000
54#define MOD_CCM_ENC 0x4000
55#define MOD_CCM_DEC 0x5000
56
57#define KEYLEN_128 4
58#define KEYLEN_192 6
59#define KEYLEN_256 8
60
61#define CIPH_DECR 0x0000
62#define CIPH_ENCR 0x0400
63
64#define MOD_DES 0x0000
65#define MOD_TDEA2 0x0100
66#define MOD_3DES 0x0200
67#define MOD_AES 0x0800
68#define MOD_AES128 (0x0800 | KEYLEN_128)
69#define MOD_AES192 (0x0900 | KEYLEN_192)
70#define MOD_AES256 (0x0a00 | KEYLEN_256)
71
72#define MAX_IVLEN 16
73#define NPE_ID 2
74#define NPE_QLEN 16
75
76
77#define NPE_QLEN_TOTAL 64
78
79#define SEND_QID 29
80#define RECV_QID 30
81
82#define CTL_FLAG_UNUSED 0x0000
83#define CTL_FLAG_USED 0x1000
84#define CTL_FLAG_PERFORM_ABLK 0x0001
85#define CTL_FLAG_GEN_ICV 0x0002
86#define CTL_FLAG_GEN_REVAES 0x0004
87#define CTL_FLAG_PERFORM_AEAD 0x0008
88#define CTL_FLAG_MASK 0x000f
89
90#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
91
92#define MD5_DIGEST_SIZE 16
93
94struct buffer_desc {
95 u32 phys_next;
96#ifdef __ARMEB__
97 u16 buf_len;
98 u16 pkt_len;
99#else
100 u16 pkt_len;
101 u16 buf_len;
102#endif
103 dma_addr_t phys_addr;
104 u32 __reserved[4];
105 struct buffer_desc *next;
106 enum dma_data_direction dir;
107};
108
109struct crypt_ctl {
110#ifdef __ARMEB__
111 u8 mode;
112 u8 init_len;
113 u16 reserved;
114#else
115 u16 reserved;
116 u8 init_len;
117 u8 mode;
118#endif
119 u8 iv[MAX_IVLEN];
120 dma_addr_t icv_rev_aes;
121 dma_addr_t src_buf;
122 dma_addr_t dst_buf;
123#ifdef __ARMEB__
124 u16 auth_offs;
125 u16 auth_len;
126 u16 crypt_offs;
127 u16 crypt_len;
128#else
129 u16 auth_len;
130 u16 auth_offs;
131 u16 crypt_len;
132 u16 crypt_offs;
133#endif
134 u32 aadAddr;
135 u32 crypto_ctx;
136
137
138 unsigned ctl_flags;
139 union {
140 struct ablkcipher_request *ablk_req;
141 struct aead_request *aead_req;
142 struct crypto_tfm *tfm;
143 } data;
144 struct buffer_desc *regist_buf;
145 u8 *regist_ptr;
146};
147
148struct ablk_ctx {
149 struct buffer_desc *src;
150 struct buffer_desc *dst;
151};
152
153struct aead_ctx {
154 struct buffer_desc *src;
155 struct buffer_desc *dst;
156 struct scatterlist ivlist;
157
158 u8 *hmac_virt;
159 int encrypt;
160};
161
162struct ix_hash_algo {
163 u32 cfgword;
164 unsigned char *icv;
165};
166
167struct ix_sa_dir {
168 unsigned char *npe_ctx;
169 dma_addr_t npe_ctx_phys;
170 int npe_ctx_idx;
171 u8 npe_mode;
172};
173
174struct ixp_ctx {
175 struct ix_sa_dir encrypt;
176 struct ix_sa_dir decrypt;
177 int authkey_len;
178 u8 authkey[MAX_KEYLEN];
179 int enckey_len;
180 u8 enckey[MAX_KEYLEN];
181 u8 salt[MAX_IVLEN];
182 u8 nonce[CTR_RFC3686_NONCE_SIZE];
183 unsigned salted;
184 atomic_t configuring;
185 struct completion completion;
186};
187
188struct ixp_alg {
189 struct crypto_alg crypto;
190 const struct ix_hash_algo *hash;
191 u32 cfg_enc;
192 u32 cfg_dec;
193
194 int registered;
195};
196
197struct ixp_aead_alg {
198 struct aead_alg crypto;
199 const struct ix_hash_algo *hash;
200 u32 cfg_enc;
201 u32 cfg_dec;
202
203 int registered;
204};
205
206static const struct ix_hash_algo hash_alg_md5 = {
207 .cfgword = 0xAA010004,
208 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
209 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
210};
211static const struct ix_hash_algo hash_alg_sha1 = {
212 .cfgword = 0x00000005,
213 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
214 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
215};
216
217static struct npe *npe_c;
218static struct dma_pool *buffer_pool = NULL;
219static struct dma_pool *ctx_pool = NULL;
220
221static struct crypt_ctl *crypt_virt = NULL;
222static dma_addr_t crypt_phys;
223
224static int support_aes = 1;
225
226#define DRIVER_NAME "ixp4xx_crypto"
227
228static struct platform_device *pdev;
229
230static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
231{
232 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
233}
234
235static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
236{
237 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
238}
239
240static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
241{
242 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
243}
244
245static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
246{
247 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
248}
249
250static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
251{
252 return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
253}
254
255static int setup_crypt_desc(void)
256{
257 struct device *dev = &pdev->dev;
258 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
259 crypt_virt = dma_alloc_coherent(dev,
260 NPE_QLEN * sizeof(struct crypt_ctl),
261 &crypt_phys, GFP_ATOMIC);
262 if (!crypt_virt)
263 return -ENOMEM;
264 return 0;
265}
266
267static spinlock_t desc_lock;
268static struct crypt_ctl *get_crypt_desc(void)
269{
270 int i;
271 static int idx = 0;
272 unsigned long flags;
273
274 spin_lock_irqsave(&desc_lock, flags);
275
276 if (unlikely(!crypt_virt))
277 setup_crypt_desc();
278 if (unlikely(!crypt_virt)) {
279 spin_unlock_irqrestore(&desc_lock, flags);
280 return NULL;
281 }
282 i = idx;
283 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
284 if (++idx >= NPE_QLEN)
285 idx = 0;
286 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
287 spin_unlock_irqrestore(&desc_lock, flags);
288 return crypt_virt +i;
289 } else {
290 spin_unlock_irqrestore(&desc_lock, flags);
291 return NULL;
292 }
293}
294
295static spinlock_t emerg_lock;
296static struct crypt_ctl *get_crypt_desc_emerg(void)
297{
298 int i;
299 static int idx = NPE_QLEN;
300 struct crypt_ctl *desc;
301 unsigned long flags;
302
303 desc = get_crypt_desc();
304 if (desc)
305 return desc;
306 if (unlikely(!crypt_virt))
307 return NULL;
308
309 spin_lock_irqsave(&emerg_lock, flags);
310 i = idx;
311 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
312 if (++idx >= NPE_QLEN_TOTAL)
313 idx = NPE_QLEN;
314 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
315 spin_unlock_irqrestore(&emerg_lock, flags);
316 return crypt_virt +i;
317 } else {
318 spin_unlock_irqrestore(&emerg_lock, flags);
319 return NULL;
320 }
321}
322
323static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
324 dma_addr_t phys)
325{
326 while (buf) {
327 struct buffer_desc *buf1;
328 u32 phys1;
329
330 buf1 = buf->next;
331 phys1 = buf->phys_next;
332 dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
333 dma_pool_free(buffer_pool, buf, phys);
334 buf = buf1;
335 phys = phys1;
336 }
337}
338
339static struct tasklet_struct crypto_done_tasklet;
340
341static void finish_scattered_hmac(struct crypt_ctl *crypt)
342{
343 struct aead_request *req = crypt->data.aead_req;
344 struct aead_ctx *req_ctx = aead_request_ctx(req);
345 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
346 int authsize = crypto_aead_authsize(tfm);
347 int decryptlen = req->assoclen + req->cryptlen - authsize;
348
349 if (req_ctx->encrypt) {
350 scatterwalk_map_and_copy(req_ctx->hmac_virt,
351 req->dst, decryptlen, authsize, 1);
352 }
353 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
354}
355
356static void one_packet(dma_addr_t phys)
357{
358 struct device *dev = &pdev->dev;
359 struct crypt_ctl *crypt;
360 struct ixp_ctx *ctx;
361 int failed;
362
363 failed = phys & 0x1 ? -EBADMSG : 0;
364 phys &= ~0x3;
365 crypt = crypt_phys2virt(phys);
366
367 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
368 case CTL_FLAG_PERFORM_AEAD: {
369 struct aead_request *req = crypt->data.aead_req;
370 struct aead_ctx *req_ctx = aead_request_ctx(req);
371
372 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
373 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
374 if (req_ctx->hmac_virt) {
375 finish_scattered_hmac(crypt);
376 }
377 req->base.complete(&req->base, failed);
378 break;
379 }
380 case CTL_FLAG_PERFORM_ABLK: {
381 struct ablkcipher_request *req = crypt->data.ablk_req;
382 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
383
384 if (req_ctx->dst) {
385 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
386 }
387 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
388 req->base.complete(&req->base, failed);
389 break;
390 }
391 case CTL_FLAG_GEN_ICV:
392 ctx = crypto_tfm_ctx(crypt->data.tfm);
393 dma_pool_free(ctx_pool, crypt->regist_ptr,
394 crypt->regist_buf->phys_addr);
395 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
396 if (atomic_dec_and_test(&ctx->configuring))
397 complete(&ctx->completion);
398 break;
399 case CTL_FLAG_GEN_REVAES:
400 ctx = crypto_tfm_ctx(crypt->data.tfm);
401 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
402 if (atomic_dec_and_test(&ctx->configuring))
403 complete(&ctx->completion);
404 break;
405 default:
406 BUG();
407 }
408 crypt->ctl_flags = CTL_FLAG_UNUSED;
409}
410
411static void irqhandler(void *_unused)
412{
413 tasklet_schedule(&crypto_done_tasklet);
414}
415
416static void crypto_done_action(unsigned long arg)
417{
418 int i;
419
420 for(i=0; i<4; i++) {
421 dma_addr_t phys = qmgr_get_entry(RECV_QID);
422 if (!phys)
423 return;
424 one_packet(phys);
425 }
426 tasklet_schedule(&crypto_done_tasklet);
427}
428
429static int init_ixp_crypto(struct device *dev)
430{
431 int ret = -ENODEV;
432 u32 msg[2] = { 0, 0 };
433
434 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
435 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
436 printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
437 return ret;
438 }
439 npe_c = npe_request(NPE_ID);
440 if (!npe_c)
441 return ret;
442
443 if (!npe_running(npe_c)) {
444 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
445 if (ret)
446 goto npe_release;
447 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
448 goto npe_error;
449 } else {
450 if (npe_send_message(npe_c, msg, "STATUS_MSG"))
451 goto npe_error;
452
453 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
454 goto npe_error;
455 }
456
457 switch ((msg[1]>>16) & 0xff) {
458 case 3:
459 printk(KERN_WARNING "Firmware of %s lacks AES support\n",
460 npe_name(npe_c));
461 support_aes = 0;
462 break;
463 case 4:
464 case 5:
465 support_aes = 1;
466 break;
467 default:
468 printk(KERN_ERR "Firmware of %s lacks crypto support\n",
469 npe_name(npe_c));
470 ret = -ENODEV;
471 goto npe_release;
472 }
473
474
475
476 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
477 buffer_pool = dma_pool_create("buffer", dev,
478 sizeof(struct buffer_desc), 32, 0);
479 ret = -ENOMEM;
480 if (!buffer_pool) {
481 goto err;
482 }
483 ctx_pool = dma_pool_create("context", dev,
484 NPE_CTX_LEN, 16, 0);
485 if (!ctx_pool) {
486 goto err;
487 }
488 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
489 "ixp_crypto:out", NULL);
490 if (ret)
491 goto err;
492 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
493 "ixp_crypto:in", NULL);
494 if (ret) {
495 qmgr_release_queue(SEND_QID);
496 goto err;
497 }
498 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
499 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
500
501 qmgr_enable_irq(RECV_QID);
502 return 0;
503
504npe_error:
505 printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
506 ret = -EIO;
507err:
508 dma_pool_destroy(ctx_pool);
509 dma_pool_destroy(buffer_pool);
510npe_release:
511 npe_release(npe_c);
512 return ret;
513}
514
515static void release_ixp_crypto(struct device *dev)
516{
517 qmgr_disable_irq(RECV_QID);
518 tasklet_kill(&crypto_done_tasklet);
519
520 qmgr_release_queue(SEND_QID);
521 qmgr_release_queue(RECV_QID);
522
523 dma_pool_destroy(ctx_pool);
524 dma_pool_destroy(buffer_pool);
525
526 npe_release(npe_c);
527
528 if (crypt_virt) {
529 dma_free_coherent(dev,
530 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
531 crypt_virt, crypt_phys);
532 }
533}
534
535static void reset_sa_dir(struct ix_sa_dir *dir)
536{
537 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
538 dir->npe_ctx_idx = 0;
539 dir->npe_mode = 0;
540}
541
542static int init_sa_dir(struct ix_sa_dir *dir)
543{
544 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
545 if (!dir->npe_ctx) {
546 return -ENOMEM;
547 }
548 reset_sa_dir(dir);
549 return 0;
550}
551
552static void free_sa_dir(struct ix_sa_dir *dir)
553{
554 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
555 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
556}
557
558static int init_tfm(struct crypto_tfm *tfm)
559{
560 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
561 int ret;
562
563 atomic_set(&ctx->configuring, 0);
564 ret = init_sa_dir(&ctx->encrypt);
565 if (ret)
566 return ret;
567 ret = init_sa_dir(&ctx->decrypt);
568 if (ret) {
569 free_sa_dir(&ctx->encrypt);
570 }
571 return ret;
572}
573
574static int init_tfm_ablk(struct crypto_tfm *tfm)
575{
576 tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
577 return init_tfm(tfm);
578}
579
580static int init_tfm_aead(struct crypto_aead *tfm)
581{
582 crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
583 return init_tfm(crypto_aead_tfm(tfm));
584}
585
586static void exit_tfm(struct crypto_tfm *tfm)
587{
588 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
589 free_sa_dir(&ctx->encrypt);
590 free_sa_dir(&ctx->decrypt);
591}
592
593static void exit_tfm_aead(struct crypto_aead *tfm)
594{
595 exit_tfm(crypto_aead_tfm(tfm));
596}
597
598static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
599 int init_len, u32 ctx_addr, const u8 *key, int key_len)
600{
601 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
602 struct crypt_ctl *crypt;
603 struct buffer_desc *buf;
604 int i;
605 u8 *pad;
606 dma_addr_t pad_phys, buf_phys;
607
608 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
609 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
610 if (!pad)
611 return -ENOMEM;
612 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
613 if (!buf) {
614 dma_pool_free(ctx_pool, pad, pad_phys);
615 return -ENOMEM;
616 }
617 crypt = get_crypt_desc_emerg();
618 if (!crypt) {
619 dma_pool_free(ctx_pool, pad, pad_phys);
620 dma_pool_free(buffer_pool, buf, buf_phys);
621 return -EAGAIN;
622 }
623
624 memcpy(pad, key, key_len);
625 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
626 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
627 pad[i] ^= xpad;
628 }
629
630 crypt->data.tfm = tfm;
631 crypt->regist_ptr = pad;
632 crypt->regist_buf = buf;
633
634 crypt->auth_offs = 0;
635 crypt->auth_len = HMAC_PAD_BLOCKLEN;
636 crypt->crypto_ctx = ctx_addr;
637 crypt->src_buf = buf_phys;
638 crypt->icv_rev_aes = target;
639 crypt->mode = NPE_OP_HASH_GEN_ICV;
640 crypt->init_len = init_len;
641 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
642
643 buf->next = 0;
644 buf->buf_len = HMAC_PAD_BLOCKLEN;
645 buf->pkt_len = 0;
646 buf->phys_addr = pad_phys;
647
648 atomic_inc(&ctx->configuring);
649 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
650 BUG_ON(qmgr_stat_overflow(SEND_QID));
651 return 0;
652}
653
654static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
655 const u8 *key, int key_len, unsigned digest_len)
656{
657 u32 itarget, otarget, npe_ctx_addr;
658 unsigned char *cinfo;
659 int init_len, ret = 0;
660 u32 cfgword;
661 struct ix_sa_dir *dir;
662 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
663 const struct ix_hash_algo *algo;
664
665 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
666 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
667 algo = ix_hash(tfm);
668
669
670 cfgword = algo->cfgword | ( authsize << 6);
671#ifndef __ARMEB__
672 cfgword ^= 0xAA000000;
673#endif
674 *(u32*)cinfo = cpu_to_be32(cfgword);
675 cinfo += sizeof(cfgword);
676
677
678 memcpy(cinfo, algo->icv, digest_len);
679 cinfo += digest_len;
680
681 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
682 + sizeof(algo->cfgword);
683 otarget = itarget + digest_len;
684 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
685 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
686
687 dir->npe_ctx_idx += init_len;
688 dir->npe_mode |= NPE_OP_HASH_ENABLE;
689
690 if (!encrypt)
691 dir->npe_mode |= NPE_OP_HASH_VERIFY;
692
693 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
694 init_len, npe_ctx_addr, key, key_len);
695 if (ret)
696 return ret;
697 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
698 init_len, npe_ctx_addr, key, key_len);
699}
700
701static int gen_rev_aes_key(struct crypto_tfm *tfm)
702{
703 struct crypt_ctl *crypt;
704 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
705 struct ix_sa_dir *dir = &ctx->decrypt;
706
707 crypt = get_crypt_desc_emerg();
708 if (!crypt) {
709 return -EAGAIN;
710 }
711 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
712
713 crypt->data.tfm = tfm;
714 crypt->crypt_offs = 0;
715 crypt->crypt_len = AES_BLOCK128;
716 crypt->src_buf = 0;
717 crypt->crypto_ctx = dir->npe_ctx_phys;
718 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
719 crypt->mode = NPE_OP_ENC_GEN_KEY;
720 crypt->init_len = dir->npe_ctx_idx;
721 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
722
723 atomic_inc(&ctx->configuring);
724 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
725 BUG_ON(qmgr_stat_overflow(SEND_QID));
726 return 0;
727}
728
729static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
730 const u8 *key, int key_len)
731{
732 u8 *cinfo;
733 u32 cipher_cfg;
734 u32 keylen_cfg = 0;
735 struct ix_sa_dir *dir;
736 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
737 u32 *flags = &tfm->crt_flags;
738
739 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
740 cinfo = dir->npe_ctx;
741
742 if (encrypt) {
743 cipher_cfg = cipher_cfg_enc(tfm);
744 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
745 } else {
746 cipher_cfg = cipher_cfg_dec(tfm);
747 }
748 if (cipher_cfg & MOD_AES) {
749 switch (key_len) {
750 case 16: keylen_cfg = MOD_AES128; break;
751 case 24: keylen_cfg = MOD_AES192; break;
752 case 32: keylen_cfg = MOD_AES256; break;
753 default:
754 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
755 return -EINVAL;
756 }
757 cipher_cfg |= keylen_cfg;
758 } else {
759 u32 tmp[DES_EXPKEY_WORDS];
760 if (des_ekey(tmp, key) == 0) {
761 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
762 }
763 }
764
765 *(u32*)cinfo = cpu_to_be32(cipher_cfg);
766 cinfo += sizeof(cipher_cfg);
767
768
769 memcpy(cinfo, key, key_len);
770
771 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
772 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
773 key_len = DES3_EDE_KEY_SIZE;
774 }
775 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
776 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
777 if ((cipher_cfg & MOD_AES) && !encrypt) {
778 return gen_rev_aes_key(tfm);
779 }
780 return 0;
781}
782
783static struct buffer_desc *chainup_buffers(struct device *dev,
784 struct scatterlist *sg, unsigned nbytes,
785 struct buffer_desc *buf, gfp_t flags,
786 enum dma_data_direction dir)
787{
788 for (; nbytes > 0; sg = sg_next(sg)) {
789 unsigned len = min(nbytes, sg->length);
790 struct buffer_desc *next_buf;
791 dma_addr_t next_buf_phys;
792 void *ptr;
793
794 nbytes -= len;
795 ptr = sg_virt(sg);
796 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
797 if (!next_buf) {
798 buf = NULL;
799 break;
800 }
801 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
802 buf->next = next_buf;
803 buf->phys_next = next_buf_phys;
804 buf = next_buf;
805
806 buf->phys_addr = sg_dma_address(sg);
807 buf->buf_len = len;
808 buf->dir = dir;
809 }
810 buf->next = NULL;
811 buf->phys_next = 0;
812 return buf;
813}
814
815static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
816 unsigned int key_len)
817{
818 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
819 u32 *flags = &tfm->base.crt_flags;
820 int ret;
821
822 init_completion(&ctx->completion);
823 atomic_inc(&ctx->configuring);
824
825 reset_sa_dir(&ctx->encrypt);
826 reset_sa_dir(&ctx->decrypt);
827
828 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
829 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
830
831 ret = setup_cipher(&tfm->base, 0, key, key_len);
832 if (ret)
833 goto out;
834 ret = setup_cipher(&tfm->base, 1, key, key_len);
835 if (ret)
836 goto out;
837
838 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
839 if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
840 ret = -EINVAL;
841 } else {
842 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
843 }
844 }
845out:
846 if (!atomic_dec_and_test(&ctx->configuring))
847 wait_for_completion(&ctx->completion);
848 return ret;
849}
850
851static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
852 unsigned int key_len)
853{
854 u32 flags = crypto_ablkcipher_get_flags(tfm);
855 int err;
856
857 err = __des3_verify_key(&flags, key);
858 if (unlikely(err))
859 crypto_ablkcipher_set_flags(tfm, flags);
860
861 return ablk_setkey(tfm, key, key_len);
862}
863
864static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
865 unsigned int key_len)
866{
867 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
868
869
870 if (key_len < CTR_RFC3686_NONCE_SIZE)
871 return -EINVAL;
872
873 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
874 CTR_RFC3686_NONCE_SIZE);
875
876 key_len -= CTR_RFC3686_NONCE_SIZE;
877 return ablk_setkey(tfm, key, key_len);
878}
879
880static int ablk_perform(struct ablkcipher_request *req, int encrypt)
881{
882 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
883 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
884 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
885 struct ix_sa_dir *dir;
886 struct crypt_ctl *crypt;
887 unsigned int nbytes = req->nbytes;
888 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
889 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
890 struct buffer_desc src_hook;
891 struct device *dev = &pdev->dev;
892 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
893 GFP_KERNEL : GFP_ATOMIC;
894
895 if (qmgr_stat_full(SEND_QID))
896 return -EAGAIN;
897 if (atomic_read(&ctx->configuring))
898 return -EAGAIN;
899
900 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
901
902 crypt = get_crypt_desc();
903 if (!crypt)
904 return -ENOMEM;
905
906 crypt->data.ablk_req = req;
907 crypt->crypto_ctx = dir->npe_ctx_phys;
908 crypt->mode = dir->npe_mode;
909 crypt->init_len = dir->npe_ctx_idx;
910
911 crypt->crypt_offs = 0;
912 crypt->crypt_len = nbytes;
913
914 BUG_ON(ivsize && !req->info);
915 memcpy(crypt->iv, req->info, ivsize);
916 if (req->src != req->dst) {
917 struct buffer_desc dst_hook;
918 crypt->mode |= NPE_OP_NOT_IN_PLACE;
919
920
921 req_ctx->dst = NULL;
922 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
923 flags, DMA_FROM_DEVICE))
924 goto free_buf_dest;
925 src_direction = DMA_TO_DEVICE;
926 req_ctx->dst = dst_hook.next;
927 crypt->dst_buf = dst_hook.phys_next;
928 } else {
929 req_ctx->dst = NULL;
930 }
931 req_ctx->src = NULL;
932 if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
933 flags, src_direction))
934 goto free_buf_src;
935
936 req_ctx->src = src_hook.next;
937 crypt->src_buf = src_hook.phys_next;
938 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
939 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
940 BUG_ON(qmgr_stat_overflow(SEND_QID));
941 return -EINPROGRESS;
942
943free_buf_src:
944 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
945free_buf_dest:
946 if (req->src != req->dst) {
947 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
948 }
949 crypt->ctl_flags = CTL_FLAG_UNUSED;
950 return -ENOMEM;
951}
952
953static int ablk_encrypt(struct ablkcipher_request *req)
954{
955 return ablk_perform(req, 1);
956}
957
958static int ablk_decrypt(struct ablkcipher_request *req)
959{
960 return ablk_perform(req, 0);
961}
962
963static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
964{
965 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
966 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
967 u8 iv[CTR_RFC3686_BLOCK_SIZE];
968 u8 *info = req->info;
969 int ret;
970
971
972 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
973 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
974
975
976 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
977 cpu_to_be32(1);
978
979 req->info = iv;
980 ret = ablk_perform(req, 1);
981 req->info = info;
982 return ret;
983}
984
985static int aead_perform(struct aead_request *req, int encrypt,
986 int cryptoffset, int eff_cryptlen, u8 *iv)
987{
988 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
989 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
990 unsigned ivsize = crypto_aead_ivsize(tfm);
991 unsigned authsize = crypto_aead_authsize(tfm);
992 struct ix_sa_dir *dir;
993 struct crypt_ctl *crypt;
994 unsigned int cryptlen;
995 struct buffer_desc *buf, src_hook;
996 struct aead_ctx *req_ctx = aead_request_ctx(req);
997 struct device *dev = &pdev->dev;
998 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
999 GFP_KERNEL : GFP_ATOMIC;
1000 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
1001 unsigned int lastlen;
1002
1003 if (qmgr_stat_full(SEND_QID))
1004 return -EAGAIN;
1005 if (atomic_read(&ctx->configuring))
1006 return -EAGAIN;
1007
1008 if (encrypt) {
1009 dir = &ctx->encrypt;
1010 cryptlen = req->cryptlen;
1011 } else {
1012 dir = &ctx->decrypt;
1013
1014 cryptlen = req->cryptlen -authsize;
1015 eff_cryptlen -= authsize;
1016 }
1017 crypt = get_crypt_desc();
1018 if (!crypt)
1019 return -ENOMEM;
1020
1021 crypt->data.aead_req = req;
1022 crypt->crypto_ctx = dir->npe_ctx_phys;
1023 crypt->mode = dir->npe_mode;
1024 crypt->init_len = dir->npe_ctx_idx;
1025
1026 crypt->crypt_offs = cryptoffset;
1027 crypt->crypt_len = eff_cryptlen;
1028
1029 crypt->auth_offs = 0;
1030 crypt->auth_len = req->assoclen + cryptlen;
1031 BUG_ON(ivsize && !req->iv);
1032 memcpy(crypt->iv, req->iv, ivsize);
1033
1034 buf = chainup_buffers(dev, req->src, crypt->auth_len,
1035 &src_hook, flags, src_direction);
1036 req_ctx->src = src_hook.next;
1037 crypt->src_buf = src_hook.phys_next;
1038 if (!buf)
1039 goto free_buf_src;
1040
1041 lastlen = buf->buf_len;
1042 if (lastlen >= authsize)
1043 crypt->icv_rev_aes = buf->phys_addr +
1044 buf->buf_len - authsize;
1045
1046 req_ctx->dst = NULL;
1047
1048 if (req->src != req->dst) {
1049 struct buffer_desc dst_hook;
1050
1051 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1052 src_direction = DMA_TO_DEVICE;
1053
1054 buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1055 &dst_hook, flags, DMA_FROM_DEVICE);
1056 req_ctx->dst = dst_hook.next;
1057 crypt->dst_buf = dst_hook.phys_next;
1058
1059 if (!buf)
1060 goto free_buf_dst;
1061
1062 if (encrypt) {
1063 lastlen = buf->buf_len;
1064 if (lastlen >= authsize)
1065 crypt->icv_rev_aes = buf->phys_addr +
1066 buf->buf_len - authsize;
1067 }
1068 }
1069
1070 if (unlikely(lastlen < authsize)) {
1071
1072
1073 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1074 &crypt->icv_rev_aes);
1075 if (unlikely(!req_ctx->hmac_virt))
1076 goto free_buf_dst;
1077 if (!encrypt) {
1078 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1079 req->src, cryptlen, authsize, 0);
1080 }
1081 req_ctx->encrypt = encrypt;
1082 } else {
1083 req_ctx->hmac_virt = NULL;
1084 }
1085
1086 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1087 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1088 BUG_ON(qmgr_stat_overflow(SEND_QID));
1089 return -EINPROGRESS;
1090
1091free_buf_dst:
1092 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1093free_buf_src:
1094 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1095 crypt->ctl_flags = CTL_FLAG_UNUSED;
1096 return -ENOMEM;
1097}
1098
1099static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1100{
1101 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1102 u32 *flags = &tfm->base.crt_flags;
1103 unsigned digest_len = crypto_aead_maxauthsize(tfm);
1104 int ret;
1105
1106 if (!ctx->enckey_len && !ctx->authkey_len)
1107 return 0;
1108 init_completion(&ctx->completion);
1109 atomic_inc(&ctx->configuring);
1110
1111 reset_sa_dir(&ctx->encrypt);
1112 reset_sa_dir(&ctx->decrypt);
1113
1114 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1115 if (ret)
1116 goto out;
1117 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1118 if (ret)
1119 goto out;
1120 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1121 ctx->authkey_len, digest_len);
1122 if (ret)
1123 goto out;
1124 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1125 ctx->authkey_len, digest_len);
1126 if (ret)
1127 goto out;
1128
1129 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1130 if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
1131 ret = -EINVAL;
1132 goto out;
1133 } else {
1134 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1135 }
1136 }
1137out:
1138 if (!atomic_dec_and_test(&ctx->configuring))
1139 wait_for_completion(&ctx->completion);
1140 return ret;
1141}
1142
1143static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1144{
1145 int max = crypto_aead_maxauthsize(tfm) >> 2;
1146
1147 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1148 return -EINVAL;
1149 return aead_setup(tfm, authsize);
1150}
1151
1152static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1153 unsigned int keylen)
1154{
1155 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1156 struct crypto_authenc_keys keys;
1157
1158 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1159 goto badkey;
1160
1161 if (keys.authkeylen > sizeof(ctx->authkey))
1162 goto badkey;
1163
1164 if (keys.enckeylen > sizeof(ctx->enckey))
1165 goto badkey;
1166
1167 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1168 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1169 ctx->authkey_len = keys.authkeylen;
1170 ctx->enckey_len = keys.enckeylen;
1171
1172 memzero_explicit(&keys, sizeof(keys));
1173 return aead_setup(tfm, crypto_aead_authsize(tfm));
1174badkey:
1175 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1176 memzero_explicit(&keys, sizeof(keys));
1177 return -EINVAL;
1178}
1179
1180static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1181 unsigned int keylen)
1182{
1183 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1184 u32 flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
1185 struct crypto_authenc_keys keys;
1186 int err;
1187
1188 err = crypto_authenc_extractkeys(&keys, key, keylen);
1189 if (unlikely(err))
1190 goto badkey;
1191
1192 err = -EINVAL;
1193 if (keys.authkeylen > sizeof(ctx->authkey))
1194 goto badkey;
1195
1196 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
1197 goto badkey;
1198
1199 flags = crypto_aead_get_flags(tfm);
1200 err = __des3_verify_key(&flags, keys.enckey);
1201 if (unlikely(err))
1202 goto badkey;
1203
1204 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1205 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1206 ctx->authkey_len = keys.authkeylen;
1207 ctx->enckey_len = keys.enckeylen;
1208
1209 memzero_explicit(&keys, sizeof(keys));
1210 return aead_setup(tfm, crypto_aead_authsize(tfm));
1211badkey:
1212 crypto_aead_set_flags(tfm, flags);
1213 memzero_explicit(&keys, sizeof(keys));
1214 return err;
1215}
1216
1217static int aead_encrypt(struct aead_request *req)
1218{
1219 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1220}
1221
1222static int aead_decrypt(struct aead_request *req)
1223{
1224 return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1225}
1226
1227static struct ixp_alg ixp4xx_algos[] = {
1228{
1229 .crypto = {
1230 .cra_name = "cbc(des)",
1231 .cra_blocksize = DES_BLOCK_SIZE,
1232 .cra_u = { .ablkcipher = {
1233 .min_keysize = DES_KEY_SIZE,
1234 .max_keysize = DES_KEY_SIZE,
1235 .ivsize = DES_BLOCK_SIZE,
1236 }
1237 }
1238 },
1239 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1240 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1241
1242}, {
1243 .crypto = {
1244 .cra_name = "ecb(des)",
1245 .cra_blocksize = DES_BLOCK_SIZE,
1246 .cra_u = { .ablkcipher = {
1247 .min_keysize = DES_KEY_SIZE,
1248 .max_keysize = DES_KEY_SIZE,
1249 }
1250 }
1251 },
1252 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1253 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1254}, {
1255 .crypto = {
1256 .cra_name = "cbc(des3_ede)",
1257 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1258 .cra_u = { .ablkcipher = {
1259 .min_keysize = DES3_EDE_KEY_SIZE,
1260 .max_keysize = DES3_EDE_KEY_SIZE,
1261 .ivsize = DES3_EDE_BLOCK_SIZE,
1262 .setkey = ablk_des3_setkey,
1263 }
1264 }
1265 },
1266 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1267 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1268}, {
1269 .crypto = {
1270 .cra_name = "ecb(des3_ede)",
1271 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1272 .cra_u = { .ablkcipher = {
1273 .min_keysize = DES3_EDE_KEY_SIZE,
1274 .max_keysize = DES3_EDE_KEY_SIZE,
1275 .setkey = ablk_des3_setkey,
1276 }
1277 }
1278 },
1279 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1280 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1281}, {
1282 .crypto = {
1283 .cra_name = "cbc(aes)",
1284 .cra_blocksize = AES_BLOCK_SIZE,
1285 .cra_u = { .ablkcipher = {
1286 .min_keysize = AES_MIN_KEY_SIZE,
1287 .max_keysize = AES_MAX_KEY_SIZE,
1288 .ivsize = AES_BLOCK_SIZE,
1289 }
1290 }
1291 },
1292 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1293 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1294}, {
1295 .crypto = {
1296 .cra_name = "ecb(aes)",
1297 .cra_blocksize = AES_BLOCK_SIZE,
1298 .cra_u = { .ablkcipher = {
1299 .min_keysize = AES_MIN_KEY_SIZE,
1300 .max_keysize = AES_MAX_KEY_SIZE,
1301 }
1302 }
1303 },
1304 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1305 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1306}, {
1307 .crypto = {
1308 .cra_name = "ctr(aes)",
1309 .cra_blocksize = AES_BLOCK_SIZE,
1310 .cra_u = { .ablkcipher = {
1311 .min_keysize = AES_MIN_KEY_SIZE,
1312 .max_keysize = AES_MAX_KEY_SIZE,
1313 .ivsize = AES_BLOCK_SIZE,
1314 }
1315 }
1316 },
1317 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1318 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1319}, {
1320 .crypto = {
1321 .cra_name = "rfc3686(ctr(aes))",
1322 .cra_blocksize = AES_BLOCK_SIZE,
1323 .cra_u = { .ablkcipher = {
1324 .min_keysize = AES_MIN_KEY_SIZE,
1325 .max_keysize = AES_MAX_KEY_SIZE,
1326 .ivsize = AES_BLOCK_SIZE,
1327 .setkey = ablk_rfc3686_setkey,
1328 .encrypt = ablk_rfc3686_crypt,
1329 .decrypt = ablk_rfc3686_crypt }
1330 }
1331 },
1332 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1333 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1334} };
1335
1336static struct ixp_aead_alg ixp4xx_aeads[] = {
1337{
1338 .crypto = {
1339 .base = {
1340 .cra_name = "authenc(hmac(md5),cbc(des))",
1341 .cra_blocksize = DES_BLOCK_SIZE,
1342 },
1343 .ivsize = DES_BLOCK_SIZE,
1344 .maxauthsize = MD5_DIGEST_SIZE,
1345 },
1346 .hash = &hash_alg_md5,
1347 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1348 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1349}, {
1350 .crypto = {
1351 .base = {
1352 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1353 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1354 },
1355 .ivsize = DES3_EDE_BLOCK_SIZE,
1356 .maxauthsize = MD5_DIGEST_SIZE,
1357 .setkey = des3_aead_setkey,
1358 },
1359 .hash = &hash_alg_md5,
1360 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1361 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1362}, {
1363 .crypto = {
1364 .base = {
1365 .cra_name = "authenc(hmac(sha1),cbc(des))",
1366 .cra_blocksize = DES_BLOCK_SIZE,
1367 },
1368 .ivsize = DES_BLOCK_SIZE,
1369 .maxauthsize = SHA1_DIGEST_SIZE,
1370 },
1371 .hash = &hash_alg_sha1,
1372 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1373 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1374}, {
1375 .crypto = {
1376 .base = {
1377 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1378 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1379 },
1380 .ivsize = DES3_EDE_BLOCK_SIZE,
1381 .maxauthsize = SHA1_DIGEST_SIZE,
1382 .setkey = des3_aead_setkey,
1383 },
1384 .hash = &hash_alg_sha1,
1385 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1386 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1387}, {
1388 .crypto = {
1389 .base = {
1390 .cra_name = "authenc(hmac(md5),cbc(aes))",
1391 .cra_blocksize = AES_BLOCK_SIZE,
1392 },
1393 .ivsize = AES_BLOCK_SIZE,
1394 .maxauthsize = MD5_DIGEST_SIZE,
1395 },
1396 .hash = &hash_alg_md5,
1397 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1398 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1399}, {
1400 .crypto = {
1401 .base = {
1402 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1403 .cra_blocksize = AES_BLOCK_SIZE,
1404 },
1405 .ivsize = AES_BLOCK_SIZE,
1406 .maxauthsize = SHA1_DIGEST_SIZE,
1407 },
1408 .hash = &hash_alg_sha1,
1409 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1410 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1411} };
1412
1413#define IXP_POSTFIX "-ixp4xx"
1414
1415static const struct platform_device_info ixp_dev_info __initdata = {
1416 .name = DRIVER_NAME,
1417 .id = 0,
1418 .dma_mask = DMA_BIT_MASK(32),
1419};
1420
1421static int __init ixp_module_init(void)
1422{
1423 int num = ARRAY_SIZE(ixp4xx_algos);
1424 int i, err;
1425
1426 pdev = platform_device_register_full(&ixp_dev_info);
1427 if (IS_ERR(pdev))
1428 return PTR_ERR(pdev);
1429
1430 spin_lock_init(&desc_lock);
1431 spin_lock_init(&emerg_lock);
1432
1433 err = init_ixp_crypto(&pdev->dev);
1434 if (err) {
1435 platform_device_unregister(pdev);
1436 return err;
1437 }
1438 for (i=0; i< num; i++) {
1439 struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1440
1441 if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1442 "%s"IXP_POSTFIX, cra->cra_name) >=
1443 CRYPTO_MAX_ALG_NAME)
1444 {
1445 continue;
1446 }
1447 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1448 continue;
1449 }
1450
1451
1452 cra->cra_type = &crypto_ablkcipher_type;
1453 cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1454 CRYPTO_ALG_KERN_DRIVER_ONLY |
1455 CRYPTO_ALG_ASYNC;
1456 if (!cra->cra_ablkcipher.setkey)
1457 cra->cra_ablkcipher.setkey = ablk_setkey;
1458 if (!cra->cra_ablkcipher.encrypt)
1459 cra->cra_ablkcipher.encrypt = ablk_encrypt;
1460 if (!cra->cra_ablkcipher.decrypt)
1461 cra->cra_ablkcipher.decrypt = ablk_decrypt;
1462 cra->cra_init = init_tfm_ablk;
1463
1464 cra->cra_ctxsize = sizeof(struct ixp_ctx);
1465 cra->cra_module = THIS_MODULE;
1466 cra->cra_alignmask = 3;
1467 cra->cra_priority = 300;
1468 cra->cra_exit = exit_tfm;
1469 if (crypto_register_alg(cra))
1470 printk(KERN_ERR "Failed to register '%s'\n",
1471 cra->cra_name);
1472 else
1473 ixp4xx_algos[i].registered = 1;
1474 }
1475
1476 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1477 struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1478
1479 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1480 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1481 CRYPTO_MAX_ALG_NAME)
1482 continue;
1483 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1484 continue;
1485
1486
1487 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1488 CRYPTO_ALG_ASYNC;
1489 cra->setkey = cra->setkey ?: aead_setkey;
1490 cra->setauthsize = aead_setauthsize;
1491 cra->encrypt = aead_encrypt;
1492 cra->decrypt = aead_decrypt;
1493 cra->init = init_tfm_aead;
1494 cra->exit = exit_tfm_aead;
1495
1496 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1497 cra->base.cra_module = THIS_MODULE;
1498 cra->base.cra_alignmask = 3;
1499 cra->base.cra_priority = 300;
1500
1501 if (crypto_register_aead(cra))
1502 printk(KERN_ERR "Failed to register '%s'\n",
1503 cra->base.cra_driver_name);
1504 else
1505 ixp4xx_aeads[i].registered = 1;
1506 }
1507 return 0;
1508}
1509
1510static void __exit ixp_module_exit(void)
1511{
1512 int num = ARRAY_SIZE(ixp4xx_algos);
1513 int i;
1514
1515 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1516 if (ixp4xx_aeads[i].registered)
1517 crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1518 }
1519
1520 for (i=0; i< num; i++) {
1521 if (ixp4xx_algos[i].registered)
1522 crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1523 }
1524 release_ixp_crypto(&pdev->dev);
1525 platform_device_unregister(pdev);
1526}
1527
1528module_init(ixp_module_init);
1529module_exit(ixp_module_exit);
1530
1531MODULE_LICENSE("GPL");
1532MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1533MODULE_DESCRIPTION("IXP4xx hardware crypto");
1534
1535