1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include <linux/clk.h>
19#include <linux/io.h>
20#include <linux/hw_random.h>
21#include <linux/platform_device.h>
22
23#include <linux/device.h>
24#include <linux/dmaengine.h>
25#include <linux/init.h>
26#include <linux/errno.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29#include <linux/scatterlist.h>
30#include <linux/dma-mapping.h>
31#include <linux/of_device.h>
32#include <linux/delay.h>
33#include <linux/crypto.h>
34#include <crypto/scatterwalk.h>
35#include <crypto/algapi.h>
36#include <crypto/internal/des.h>
37#include <crypto/internal/skcipher.h>
38#include "atmel-tdes-regs.h"
39
40#define ATMEL_TDES_PRIORITY 300
41
42
43
44#define TDES_FLAGS_ENCRYPT TDES_MR_CYPHER_ENC
45#define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
46#define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB
47#define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC
48#define TDES_FLAGS_OFB TDES_MR_OPMOD_OFB
49#define TDES_FLAGS_CFB64 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
50#define TDES_FLAGS_CFB32 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
51#define TDES_FLAGS_CFB16 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
52#define TDES_FLAGS_CFB8 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
53
54#define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
55
56#define TDES_FLAGS_INIT BIT(3)
57#define TDES_FLAGS_FAST BIT(4)
58#define TDES_FLAGS_BUSY BIT(5)
59#define TDES_FLAGS_DMA BIT(6)
60
61#define ATMEL_TDES_QUEUE_LENGTH 50
62
63#define CFB8_BLOCK_SIZE 1
64#define CFB16_BLOCK_SIZE 2
65#define CFB32_BLOCK_SIZE 4
66
67struct atmel_tdes_caps {
68 bool has_dma;
69 u32 has_cfb_3keys;
70};
71
72struct atmel_tdes_dev;
73
74struct atmel_tdes_ctx {
75 struct atmel_tdes_dev *dd;
76
77 int keylen;
78 u32 key[DES3_EDE_KEY_SIZE / sizeof(u32)];
79 unsigned long flags;
80
81 u16 block_size;
82};
83
84struct atmel_tdes_reqctx {
85 unsigned long mode;
86 u8 lastc[DES_BLOCK_SIZE];
87};
88
89struct atmel_tdes_dma {
90 struct dma_chan *chan;
91 struct dma_slave_config dma_conf;
92};
93
94struct atmel_tdes_dev {
95 struct list_head list;
96 unsigned long phys_base;
97 void __iomem *io_base;
98
99 struct atmel_tdes_ctx *ctx;
100 struct device *dev;
101 struct clk *iclk;
102 int irq;
103
104 unsigned long flags;
105
106 spinlock_t lock;
107 struct crypto_queue queue;
108
109 struct tasklet_struct done_task;
110 struct tasklet_struct queue_task;
111
112 struct skcipher_request *req;
113 size_t total;
114
115 struct scatterlist *in_sg;
116 unsigned int nb_in_sg;
117 size_t in_offset;
118 struct scatterlist *out_sg;
119 unsigned int nb_out_sg;
120 size_t out_offset;
121
122 size_t buflen;
123 size_t dma_size;
124
125 void *buf_in;
126 int dma_in;
127 dma_addr_t dma_addr_in;
128 struct atmel_tdes_dma dma_lch_in;
129
130 void *buf_out;
131 int dma_out;
132 dma_addr_t dma_addr_out;
133 struct atmel_tdes_dma dma_lch_out;
134
135 struct atmel_tdes_caps caps;
136
137 u32 hw_version;
138};
139
140struct atmel_tdes_drv {
141 struct list_head dev_list;
142 spinlock_t lock;
143};
144
145static struct atmel_tdes_drv atmel_tdes = {
146 .dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
147 .lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
148};
149
150static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
151 void *buf, size_t buflen, size_t total, int out)
152{
153 size_t count, off = 0;
154
155 while (buflen && total) {
156 count = min((*sg)->length - *offset, total);
157 count = min(count, buflen);
158
159 if (!count)
160 return off;
161
162 scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
163
164 off += count;
165 buflen -= count;
166 *offset += count;
167 total -= count;
168
169 if (*offset == (*sg)->length) {
170 *sg = sg_next(*sg);
171 if (*sg)
172 *offset = 0;
173 else
174 total = 0;
175 }
176 }
177
178 return off;
179}
180
181static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
182{
183 return readl_relaxed(dd->io_base + offset);
184}
185
186static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
187 u32 offset, u32 value)
188{
189 writel_relaxed(value, dd->io_base + offset);
190}
191
192static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
193 const u32 *value, int count)
194{
195 for (; count--; value++, offset += 4)
196 atmel_tdes_write(dd, offset, *value);
197}
198
199static struct atmel_tdes_dev *atmel_tdes_dev_alloc(void)
200{
201 struct atmel_tdes_dev *tdes_dd;
202
203 spin_lock_bh(&atmel_tdes.lock);
204
205 tdes_dd = list_first_entry_or_null(&atmel_tdes.dev_list,
206 struct atmel_tdes_dev, list);
207 spin_unlock_bh(&atmel_tdes.lock);
208 return tdes_dd;
209}
210
211static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
212{
213 int err;
214
215 err = clk_prepare_enable(dd->iclk);
216 if (err)
217 return err;
218
219 if (!(dd->flags & TDES_FLAGS_INIT)) {
220 atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
221 dd->flags |= TDES_FLAGS_INIT;
222 }
223
224 return 0;
225}
226
227static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
228{
229 return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
230}
231
232static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
233{
234 int err;
235
236 err = atmel_tdes_hw_init(dd);
237 if (err)
238 return err;
239
240 dd->hw_version = atmel_tdes_get_version(dd);
241
242 dev_info(dd->dev,
243 "version: 0x%x\n", dd->hw_version);
244
245 clk_disable_unprepare(dd->iclk);
246
247 return 0;
248}
249
250static void atmel_tdes_dma_callback(void *data)
251{
252 struct atmel_tdes_dev *dd = data;
253
254
255 tasklet_schedule(&dd->done_task);
256}
257
258static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
259{
260 int err;
261 u32 valmr = TDES_MR_SMOD_PDC;
262
263 err = atmel_tdes_hw_init(dd);
264
265 if (err)
266 return err;
267
268 if (!dd->caps.has_dma)
269 atmel_tdes_write(dd, TDES_PTCR,
270 TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
271
272
273 if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
274 valmr |= TDES_MR_KEYMOD_3KEY;
275 valmr |= TDES_MR_TDESMOD_TDES;
276 } else if (dd->ctx->keylen > DES_KEY_SIZE) {
277 valmr |= TDES_MR_KEYMOD_2KEY;
278 valmr |= TDES_MR_TDESMOD_TDES;
279 } else {
280 valmr |= TDES_MR_TDESMOD_DES;
281 }
282
283 valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
284
285 atmel_tdes_write(dd, TDES_MR, valmr);
286
287 atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
288 dd->ctx->keylen >> 2);
289
290 if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
291 atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
292
293 return 0;
294}
295
296static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
297{
298 int err = 0;
299 size_t count;
300
301 atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
302
303 if (dd->flags & TDES_FLAGS_FAST) {
304 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
305 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
306 } else {
307 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
308 dd->dma_size, DMA_FROM_DEVICE);
309
310
311 count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
312 dd->buf_out, dd->buflen, dd->dma_size, 1);
313 if (count != dd->dma_size) {
314 err = -EINVAL;
315 dev_dbg(dd->dev, "not all data converted: %zu\n", count);
316 }
317 }
318
319 return err;
320}
321
322static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
323{
324 int err = -ENOMEM;
325
326 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
327 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
328 dd->buflen = PAGE_SIZE;
329 dd->buflen &= ~(DES_BLOCK_SIZE - 1);
330
331 if (!dd->buf_in || !dd->buf_out) {
332 dev_dbg(dd->dev, "unable to alloc pages.\n");
333 goto err_alloc;
334 }
335
336
337 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
338 dd->buflen, DMA_TO_DEVICE);
339 err = dma_mapping_error(dd->dev, dd->dma_addr_in);
340 if (err) {
341 dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
342 goto err_map_in;
343 }
344
345 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
346 dd->buflen, DMA_FROM_DEVICE);
347 err = dma_mapping_error(dd->dev, dd->dma_addr_out);
348 if (err) {
349 dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
350 goto err_map_out;
351 }
352
353 return 0;
354
355err_map_out:
356 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
357 DMA_TO_DEVICE);
358err_map_in:
359err_alloc:
360 free_page((unsigned long)dd->buf_out);
361 free_page((unsigned long)dd->buf_in);
362 return err;
363}
364
365static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
366{
367 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
368 DMA_FROM_DEVICE);
369 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
370 DMA_TO_DEVICE);
371 free_page((unsigned long)dd->buf_out);
372 free_page((unsigned long)dd->buf_in);
373}
374
375static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
376 dma_addr_t dma_addr_in,
377 dma_addr_t dma_addr_out, int length)
378{
379 struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
380 int len32;
381
382 dd->dma_size = length;
383
384 if (!(dd->flags & TDES_FLAGS_FAST)) {
385 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
386 DMA_TO_DEVICE);
387 }
388
389 switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
390 case TDES_FLAGS_CFB8:
391 len32 = DIV_ROUND_UP(length, sizeof(u8));
392 break;
393
394 case TDES_FLAGS_CFB16:
395 len32 = DIV_ROUND_UP(length, sizeof(u16));
396 break;
397
398 default:
399 len32 = DIV_ROUND_UP(length, sizeof(u32));
400 break;
401 }
402
403 atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
404 atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
405 atmel_tdes_write(dd, TDES_TCR, len32);
406 atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
407 atmel_tdes_write(dd, TDES_RCR, len32);
408
409
410 atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
411
412
413 atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
414
415 return 0;
416}
417
418static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
419 dma_addr_t dma_addr_in,
420 dma_addr_t dma_addr_out, int length)
421{
422 struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
423 struct scatterlist sg[2];
424 struct dma_async_tx_descriptor *in_desc, *out_desc;
425 enum dma_slave_buswidth addr_width;
426
427 dd->dma_size = length;
428
429 if (!(dd->flags & TDES_FLAGS_FAST)) {
430 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
431 DMA_TO_DEVICE);
432 }
433
434 switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
435 case TDES_FLAGS_CFB8:
436 addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
437 break;
438
439 case TDES_FLAGS_CFB16:
440 addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
441 break;
442
443 default:
444 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
445 break;
446 }
447
448 dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
449 dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
450
451 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
452 dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
453
454 dd->flags |= TDES_FLAGS_DMA;
455
456 sg_init_table(&sg[0], 1);
457 sg_dma_address(&sg[0]) = dma_addr_in;
458 sg_dma_len(&sg[0]) = length;
459
460 sg_init_table(&sg[1], 1);
461 sg_dma_address(&sg[1]) = dma_addr_out;
462 sg_dma_len(&sg[1]) = length;
463
464 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
465 1, DMA_MEM_TO_DEV,
466 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
467 if (!in_desc)
468 return -EINVAL;
469
470 out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
471 1, DMA_DEV_TO_MEM,
472 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
473 if (!out_desc)
474 return -EINVAL;
475
476 out_desc->callback = atmel_tdes_dma_callback;
477 out_desc->callback_param = dd;
478
479 dmaengine_submit(out_desc);
480 dma_async_issue_pending(dd->dma_lch_out.chan);
481
482 dmaengine_submit(in_desc);
483 dma_async_issue_pending(dd->dma_lch_in.chan);
484
485 return 0;
486}
487
488static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
489{
490 int err, fast = 0, in, out;
491 size_t count;
492 dma_addr_t addr_in, addr_out;
493
494 if ((!dd->in_offset) && (!dd->out_offset)) {
495
496 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
497 IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
498 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
499 IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
500 fast = in && out;
501
502 if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
503 fast = 0;
504 }
505
506
507 if (fast) {
508 count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
509 count = min_t(size_t, count, sg_dma_len(dd->out_sg));
510
511 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
512 if (!err) {
513 dev_dbg(dd->dev, "dma_map_sg() error\n");
514 return -EINVAL;
515 }
516
517 err = dma_map_sg(dd->dev, dd->out_sg, 1,
518 DMA_FROM_DEVICE);
519 if (!err) {
520 dev_dbg(dd->dev, "dma_map_sg() error\n");
521 dma_unmap_sg(dd->dev, dd->in_sg, 1,
522 DMA_TO_DEVICE);
523 return -EINVAL;
524 }
525
526 addr_in = sg_dma_address(dd->in_sg);
527 addr_out = sg_dma_address(dd->out_sg);
528
529 dd->flags |= TDES_FLAGS_FAST;
530
531 } else {
532
533 count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
534 dd->buf_in, dd->buflen, dd->total, 0);
535
536 addr_in = dd->dma_addr_in;
537 addr_out = dd->dma_addr_out;
538
539 dd->flags &= ~TDES_FLAGS_FAST;
540 }
541
542 dd->total -= count;
543
544 if (dd->caps.has_dma)
545 err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
546 else
547 err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
548
549 if (err && (dd->flags & TDES_FLAGS_FAST)) {
550 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
551 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
552 }
553
554 return err;
555}
556
557static void
558atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
559{
560 struct skcipher_request *req = dd->req;
561 struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
562 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
563 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
564
565 if (req->cryptlen < ivsize)
566 return;
567
568 if (rctx->mode & TDES_FLAGS_ENCRYPT) {
569 scatterwalk_map_and_copy(req->iv, req->dst,
570 req->cryptlen - ivsize, ivsize, 0);
571 } else {
572 if (req->src == req->dst)
573 memcpy(req->iv, rctx->lastc, ivsize);
574 else
575 scatterwalk_map_and_copy(req->iv, req->src,
576 req->cryptlen - ivsize,
577 ivsize, 0);
578 }
579}
580
581static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
582{
583 struct skcipher_request *req = dd->req;
584 struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
585
586 clk_disable_unprepare(dd->iclk);
587
588 dd->flags &= ~TDES_FLAGS_BUSY;
589
590 if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
591 atmel_tdes_set_iv_as_last_ciphertext_block(dd);
592
593 req->base.complete(&req->base, err);
594}
595
596static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
597 struct skcipher_request *req)
598{
599 struct crypto_async_request *async_req, *backlog;
600 struct atmel_tdes_ctx *ctx;
601 struct atmel_tdes_reqctx *rctx;
602 unsigned long flags;
603 int err, ret = 0;
604
605 spin_lock_irqsave(&dd->lock, flags);
606 if (req)
607 ret = crypto_enqueue_request(&dd->queue, &req->base);
608 if (dd->flags & TDES_FLAGS_BUSY) {
609 spin_unlock_irqrestore(&dd->lock, flags);
610 return ret;
611 }
612 backlog = crypto_get_backlog(&dd->queue);
613 async_req = crypto_dequeue_request(&dd->queue);
614 if (async_req)
615 dd->flags |= TDES_FLAGS_BUSY;
616 spin_unlock_irqrestore(&dd->lock, flags);
617
618 if (!async_req)
619 return ret;
620
621 if (backlog)
622 backlog->complete(backlog, -EINPROGRESS);
623
624 req = skcipher_request_cast(async_req);
625
626
627 dd->req = req;
628 dd->total = req->cryptlen;
629 dd->in_offset = 0;
630 dd->in_sg = req->src;
631 dd->out_offset = 0;
632 dd->out_sg = req->dst;
633
634 rctx = skcipher_request_ctx(req);
635 ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
636 rctx->mode &= TDES_FLAGS_MODE_MASK;
637 dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
638 dd->ctx = ctx;
639
640 err = atmel_tdes_write_ctrl(dd);
641 if (!err)
642 err = atmel_tdes_crypt_start(dd);
643 if (err) {
644
645 atmel_tdes_finish_req(dd, err);
646 tasklet_schedule(&dd->queue_task);
647 }
648
649 return ret;
650}
651
652static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
653{
654 int err = -EINVAL;
655 size_t count;
656
657 if (dd->flags & TDES_FLAGS_DMA) {
658 err = 0;
659 if (dd->flags & TDES_FLAGS_FAST) {
660 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
661 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
662 } else {
663 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
664 dd->dma_size, DMA_FROM_DEVICE);
665
666
667 count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
668 dd->buf_out, dd->buflen, dd->dma_size, 1);
669 if (count != dd->dma_size) {
670 err = -EINVAL;
671 dev_dbg(dd->dev, "not all data converted: %zu\n", count);
672 }
673 }
674 }
675 return err;
676}
677
678static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
679{
680 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
681 struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
682 struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
683 struct device *dev = ctx->dd->dev;
684
685 if (!req->cryptlen)
686 return 0;
687
688 switch (mode & TDES_FLAGS_OPMODE_MASK) {
689 case TDES_FLAGS_CFB8:
690 if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
691 dev_dbg(dev, "request size is not exact amount of CFB8 blocks\n");
692 return -EINVAL;
693 }
694 ctx->block_size = CFB8_BLOCK_SIZE;
695 break;
696
697 case TDES_FLAGS_CFB16:
698 if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
699 dev_dbg(dev, "request size is not exact amount of CFB16 blocks\n");
700 return -EINVAL;
701 }
702 ctx->block_size = CFB16_BLOCK_SIZE;
703 break;
704
705 case TDES_FLAGS_CFB32:
706 if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
707 dev_dbg(dev, "request size is not exact amount of CFB32 blocks\n");
708 return -EINVAL;
709 }
710 ctx->block_size = CFB32_BLOCK_SIZE;
711 break;
712
713 default:
714 if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
715 dev_dbg(dev, "request size is not exact amount of DES blocks\n");
716 return -EINVAL;
717 }
718 ctx->block_size = DES_BLOCK_SIZE;
719 break;
720 }
721
722 rctx->mode = mode;
723
724 if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
725 !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
726 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
727
728 if (req->cryptlen >= ivsize)
729 scatterwalk_map_and_copy(rctx->lastc, req->src,
730 req->cryptlen - ivsize,
731 ivsize, 0);
732 }
733
734 return atmel_tdes_handle_queue(ctx->dd, req);
735}
736
737static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
738{
739 int ret;
740
741
742 dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
743 if (IS_ERR(dd->dma_lch_in.chan)) {
744 ret = PTR_ERR(dd->dma_lch_in.chan);
745 goto err_dma_in;
746 }
747
748 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
749 TDES_IDATA1R;
750 dd->dma_lch_in.dma_conf.src_maxburst = 1;
751 dd->dma_lch_in.dma_conf.src_addr_width =
752 DMA_SLAVE_BUSWIDTH_4_BYTES;
753 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
754 dd->dma_lch_in.dma_conf.dst_addr_width =
755 DMA_SLAVE_BUSWIDTH_4_BYTES;
756 dd->dma_lch_in.dma_conf.device_fc = false;
757
758 dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
759 if (IS_ERR(dd->dma_lch_out.chan)) {
760 ret = PTR_ERR(dd->dma_lch_out.chan);
761 goto err_dma_out;
762 }
763
764 dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
765 TDES_ODATA1R;
766 dd->dma_lch_out.dma_conf.src_maxburst = 1;
767 dd->dma_lch_out.dma_conf.src_addr_width =
768 DMA_SLAVE_BUSWIDTH_4_BYTES;
769 dd->dma_lch_out.dma_conf.dst_maxburst = 1;
770 dd->dma_lch_out.dma_conf.dst_addr_width =
771 DMA_SLAVE_BUSWIDTH_4_BYTES;
772 dd->dma_lch_out.dma_conf.device_fc = false;
773
774 return 0;
775
776err_dma_out:
777 dma_release_channel(dd->dma_lch_in.chan);
778err_dma_in:
779 dev_err(dd->dev, "no DMA channel available\n");
780 return ret;
781}
782
783static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
784{
785 dma_release_channel(dd->dma_lch_in.chan);
786 dma_release_channel(dd->dma_lch_out.chan);
787}
788
789static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
790 unsigned int keylen)
791{
792 struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
793 int err;
794
795 err = verify_skcipher_des_key(tfm, key);
796 if (err)
797 return err;
798
799 memcpy(ctx->key, key, keylen);
800 ctx->keylen = keylen;
801
802 return 0;
803}
804
805static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
806 unsigned int keylen)
807{
808 struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
809 int err;
810
811 err = verify_skcipher_des3_key(tfm, key);
812 if (err)
813 return err;
814
815 memcpy(ctx->key, key, keylen);
816 ctx->keylen = keylen;
817
818 return 0;
819}
820
821static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
822{
823 return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
824}
825
826static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
827{
828 return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
829}
830
831static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
832{
833 return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
834}
835
836static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
837{
838 return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
839}
840static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
841{
842 return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
843}
844
845static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
846{
847 return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
848}
849
850static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
851{
852 return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
853}
854
855static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
856{
857 return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
858}
859
860static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
861{
862 return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
863}
864
865static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
866{
867 return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
868}
869
870static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
871{
872 return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
873}
874
875static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
876{
877 return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
878}
879
880static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
881{
882 return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
883}
884
885static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
886{
887 return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
888}
889
890static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
891{
892 struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
893
894 ctx->dd = atmel_tdes_dev_alloc();
895 if (!ctx->dd)
896 return -ENODEV;
897
898 crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
899
900 return 0;
901}
902
903static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
904{
905 alg->base.cra_priority = ATMEL_TDES_PRIORITY;
906 alg->base.cra_flags = CRYPTO_ALG_ASYNC;
907 alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx);
908 alg->base.cra_module = THIS_MODULE;
909
910 alg->init = atmel_tdes_init_tfm;
911}
912
913static struct skcipher_alg tdes_algs[] = {
914{
915 .base.cra_name = "ecb(des)",
916 .base.cra_driver_name = "atmel-ecb-des",
917 .base.cra_blocksize = DES_BLOCK_SIZE,
918 .base.cra_alignmask = 0x7,
919
920 .min_keysize = DES_KEY_SIZE,
921 .max_keysize = DES_KEY_SIZE,
922 .setkey = atmel_des_setkey,
923 .encrypt = atmel_tdes_ecb_encrypt,
924 .decrypt = atmel_tdes_ecb_decrypt,
925},
926{
927 .base.cra_name = "cbc(des)",
928 .base.cra_driver_name = "atmel-cbc-des",
929 .base.cra_blocksize = DES_BLOCK_SIZE,
930 .base.cra_alignmask = 0x7,
931
932 .min_keysize = DES_KEY_SIZE,
933 .max_keysize = DES_KEY_SIZE,
934 .ivsize = DES_BLOCK_SIZE,
935 .setkey = atmel_des_setkey,
936 .encrypt = atmel_tdes_cbc_encrypt,
937 .decrypt = atmel_tdes_cbc_decrypt,
938},
939{
940 .base.cra_name = "cfb(des)",
941 .base.cra_driver_name = "atmel-cfb-des",
942 .base.cra_blocksize = DES_BLOCK_SIZE,
943 .base.cra_alignmask = 0x7,
944
945 .min_keysize = DES_KEY_SIZE,
946 .max_keysize = DES_KEY_SIZE,
947 .ivsize = DES_BLOCK_SIZE,
948 .setkey = atmel_des_setkey,
949 .encrypt = atmel_tdes_cfb_encrypt,
950 .decrypt = atmel_tdes_cfb_decrypt,
951},
952{
953 .base.cra_name = "cfb8(des)",
954 .base.cra_driver_name = "atmel-cfb8-des",
955 .base.cra_blocksize = CFB8_BLOCK_SIZE,
956 .base.cra_alignmask = 0,
957
958 .min_keysize = DES_KEY_SIZE,
959 .max_keysize = DES_KEY_SIZE,
960 .ivsize = DES_BLOCK_SIZE,
961 .setkey = atmel_des_setkey,
962 .encrypt = atmel_tdes_cfb8_encrypt,
963 .decrypt = atmel_tdes_cfb8_decrypt,
964},
965{
966 .base.cra_name = "cfb16(des)",
967 .base.cra_driver_name = "atmel-cfb16-des",
968 .base.cra_blocksize = CFB16_BLOCK_SIZE,
969 .base.cra_alignmask = 0x1,
970
971 .min_keysize = DES_KEY_SIZE,
972 .max_keysize = DES_KEY_SIZE,
973 .ivsize = DES_BLOCK_SIZE,
974 .setkey = atmel_des_setkey,
975 .encrypt = atmel_tdes_cfb16_encrypt,
976 .decrypt = atmel_tdes_cfb16_decrypt,
977},
978{
979 .base.cra_name = "cfb32(des)",
980 .base.cra_driver_name = "atmel-cfb32-des",
981 .base.cra_blocksize = CFB32_BLOCK_SIZE,
982 .base.cra_alignmask = 0x3,
983
984 .min_keysize = DES_KEY_SIZE,
985 .max_keysize = DES_KEY_SIZE,
986 .ivsize = DES_BLOCK_SIZE,
987 .setkey = atmel_des_setkey,
988 .encrypt = atmel_tdes_cfb32_encrypt,
989 .decrypt = atmel_tdes_cfb32_decrypt,
990},
991{
992 .base.cra_name = "ofb(des)",
993 .base.cra_driver_name = "atmel-ofb-des",
994 .base.cra_blocksize = 1,
995 .base.cra_alignmask = 0x7,
996
997 .min_keysize = DES_KEY_SIZE,
998 .max_keysize = DES_KEY_SIZE,
999 .ivsize = DES_BLOCK_SIZE,
1000 .setkey = atmel_des_setkey,
1001 .encrypt = atmel_tdes_ofb_encrypt,
1002 .decrypt = atmel_tdes_ofb_decrypt,
1003},
1004{
1005 .base.cra_name = "ecb(des3_ede)",
1006 .base.cra_driver_name = "atmel-ecb-tdes",
1007 .base.cra_blocksize = DES_BLOCK_SIZE,
1008 .base.cra_alignmask = 0x7,
1009
1010 .min_keysize = DES3_EDE_KEY_SIZE,
1011 .max_keysize = DES3_EDE_KEY_SIZE,
1012 .setkey = atmel_tdes_setkey,
1013 .encrypt = atmel_tdes_ecb_encrypt,
1014 .decrypt = atmel_tdes_ecb_decrypt,
1015},
1016{
1017 .base.cra_name = "cbc(des3_ede)",
1018 .base.cra_driver_name = "atmel-cbc-tdes",
1019 .base.cra_blocksize = DES_BLOCK_SIZE,
1020 .base.cra_alignmask = 0x7,
1021
1022 .min_keysize = DES3_EDE_KEY_SIZE,
1023 .max_keysize = DES3_EDE_KEY_SIZE,
1024 .setkey = atmel_tdes_setkey,
1025 .encrypt = atmel_tdes_cbc_encrypt,
1026 .decrypt = atmel_tdes_cbc_decrypt,
1027 .ivsize = DES_BLOCK_SIZE,
1028},
1029{
1030 .base.cra_name = "ofb(des3_ede)",
1031 .base.cra_driver_name = "atmel-ofb-tdes",
1032 .base.cra_blocksize = DES_BLOCK_SIZE,
1033 .base.cra_alignmask = 0x7,
1034
1035 .min_keysize = DES3_EDE_KEY_SIZE,
1036 .max_keysize = DES3_EDE_KEY_SIZE,
1037 .setkey = atmel_tdes_setkey,
1038 .encrypt = atmel_tdes_ofb_encrypt,
1039 .decrypt = atmel_tdes_ofb_decrypt,
1040 .ivsize = DES_BLOCK_SIZE,
1041},
1042};
1043
1044static void atmel_tdes_queue_task(unsigned long data)
1045{
1046 struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
1047
1048 atmel_tdes_handle_queue(dd, NULL);
1049}
1050
1051static void atmel_tdes_done_task(unsigned long data)
1052{
1053 struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
1054 int err;
1055
1056 if (!(dd->flags & TDES_FLAGS_DMA))
1057 err = atmel_tdes_crypt_pdc_stop(dd);
1058 else
1059 err = atmel_tdes_crypt_dma_stop(dd);
1060
1061 if (dd->total && !err) {
1062 if (dd->flags & TDES_FLAGS_FAST) {
1063 dd->in_sg = sg_next(dd->in_sg);
1064 dd->out_sg = sg_next(dd->out_sg);
1065 if (!dd->in_sg || !dd->out_sg)
1066 err = -EINVAL;
1067 }
1068 if (!err)
1069 err = atmel_tdes_crypt_start(dd);
1070 if (!err)
1071 return;
1072 }
1073
1074 atmel_tdes_finish_req(dd, err);
1075 atmel_tdes_handle_queue(dd, NULL);
1076}
1077
1078static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
1079{
1080 struct atmel_tdes_dev *tdes_dd = dev_id;
1081 u32 reg;
1082
1083 reg = atmel_tdes_read(tdes_dd, TDES_ISR);
1084 if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
1085 atmel_tdes_write(tdes_dd, TDES_IDR, reg);
1086 if (TDES_FLAGS_BUSY & tdes_dd->flags)
1087 tasklet_schedule(&tdes_dd->done_task);
1088 else
1089 dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
1090 return IRQ_HANDLED;
1091 }
1092
1093 return IRQ_NONE;
1094}
1095
1096static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
1097{
1098 int i;
1099
1100 for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
1101 crypto_unregister_skcipher(&tdes_algs[i]);
1102}
1103
1104static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
1105{
1106 int err, i, j;
1107
1108 for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
1109 atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
1110
1111 err = crypto_register_skcipher(&tdes_algs[i]);
1112 if (err)
1113 goto err_tdes_algs;
1114 }
1115
1116 return 0;
1117
1118err_tdes_algs:
1119 for (j = 0; j < i; j++)
1120 crypto_unregister_skcipher(&tdes_algs[j]);
1121
1122 return err;
1123}
1124
1125static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
1126{
1127
1128 dd->caps.has_dma = 0;
1129 dd->caps.has_cfb_3keys = 0;
1130
1131
1132 switch (dd->hw_version & 0xf00) {
1133 case 0x800:
1134 case 0x700:
1135 dd->caps.has_dma = 1;
1136 dd->caps.has_cfb_3keys = 1;
1137 break;
1138 case 0x600:
1139 break;
1140 default:
1141 dev_warn(dd->dev,
1142 "Unmanaged tdes version, set minimum capabilities\n");
1143 break;
1144 }
1145}
1146
1147#if defined(CONFIG_OF)
1148static const struct of_device_id atmel_tdes_dt_ids[] = {
1149 { .compatible = "atmel,at91sam9g46-tdes" },
1150 { }
1151};
1152MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
1153#endif
1154
1155static int atmel_tdes_probe(struct platform_device *pdev)
1156{
1157 struct atmel_tdes_dev *tdes_dd;
1158 struct device *dev = &pdev->dev;
1159 struct resource *tdes_res;
1160 int err;
1161
1162 tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
1163 if (!tdes_dd)
1164 return -ENOMEM;
1165
1166 tdes_dd->dev = dev;
1167
1168 platform_set_drvdata(pdev, tdes_dd);
1169
1170 INIT_LIST_HEAD(&tdes_dd->list);
1171 spin_lock_init(&tdes_dd->lock);
1172
1173 tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
1174 (unsigned long)tdes_dd);
1175 tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
1176 (unsigned long)tdes_dd);
1177
1178 crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
1179
1180
1181 tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1182 if (!tdes_res) {
1183 dev_err(dev, "no MEM resource info\n");
1184 err = -ENODEV;
1185 goto err_tasklet_kill;
1186 }
1187 tdes_dd->phys_base = tdes_res->start;
1188
1189
1190 tdes_dd->irq = platform_get_irq(pdev, 0);
1191 if (tdes_dd->irq < 0) {
1192 err = tdes_dd->irq;
1193 goto err_tasklet_kill;
1194 }
1195
1196 err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
1197 IRQF_SHARED, "atmel-tdes", tdes_dd);
1198 if (err) {
1199 dev_err(dev, "unable to request tdes irq.\n");
1200 goto err_tasklet_kill;
1201 }
1202
1203
1204 tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
1205 if (IS_ERR(tdes_dd->iclk)) {
1206 dev_err(dev, "clock initialization failed.\n");
1207 err = PTR_ERR(tdes_dd->iclk);
1208 goto err_tasklet_kill;
1209 }
1210
1211 tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
1212 if (IS_ERR(tdes_dd->io_base)) {
1213 err = PTR_ERR(tdes_dd->io_base);
1214 goto err_tasklet_kill;
1215 }
1216
1217 err = atmel_tdes_hw_version_init(tdes_dd);
1218 if (err)
1219 goto err_tasklet_kill;
1220
1221 atmel_tdes_get_cap(tdes_dd);
1222
1223 err = atmel_tdes_buff_init(tdes_dd);
1224 if (err)
1225 goto err_tasklet_kill;
1226
1227 if (tdes_dd->caps.has_dma) {
1228 err = atmel_tdes_dma_init(tdes_dd);
1229 if (err)
1230 goto err_buff_cleanup;
1231
1232 dev_info(dev, "using %s, %s for DMA transfers\n",
1233 dma_chan_name(tdes_dd->dma_lch_in.chan),
1234 dma_chan_name(tdes_dd->dma_lch_out.chan));
1235 }
1236
1237 spin_lock(&atmel_tdes.lock);
1238 list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
1239 spin_unlock(&atmel_tdes.lock);
1240
1241 err = atmel_tdes_register_algs(tdes_dd);
1242 if (err)
1243 goto err_algs;
1244
1245 dev_info(dev, "Atmel DES/TDES\n");
1246
1247 return 0;
1248
1249err_algs:
1250 spin_lock(&atmel_tdes.lock);
1251 list_del(&tdes_dd->list);
1252 spin_unlock(&atmel_tdes.lock);
1253 if (tdes_dd->caps.has_dma)
1254 atmel_tdes_dma_cleanup(tdes_dd);
1255err_buff_cleanup:
1256 atmel_tdes_buff_cleanup(tdes_dd);
1257err_tasklet_kill:
1258 tasklet_kill(&tdes_dd->done_task);
1259 tasklet_kill(&tdes_dd->queue_task);
1260
1261 return err;
1262}
1263
1264static int atmel_tdes_remove(struct platform_device *pdev)
1265{
1266 struct atmel_tdes_dev *tdes_dd;
1267
1268 tdes_dd = platform_get_drvdata(pdev);
1269 if (!tdes_dd)
1270 return -ENODEV;
1271 spin_lock(&atmel_tdes.lock);
1272 list_del(&tdes_dd->list);
1273 spin_unlock(&atmel_tdes.lock);
1274
1275 atmel_tdes_unregister_algs(tdes_dd);
1276
1277 tasklet_kill(&tdes_dd->done_task);
1278 tasklet_kill(&tdes_dd->queue_task);
1279
1280 if (tdes_dd->caps.has_dma)
1281 atmel_tdes_dma_cleanup(tdes_dd);
1282
1283 atmel_tdes_buff_cleanup(tdes_dd);
1284
1285 return 0;
1286}
1287
1288static struct platform_driver atmel_tdes_driver = {
1289 .probe = atmel_tdes_probe,
1290 .remove = atmel_tdes_remove,
1291 .driver = {
1292 .name = "atmel_tdes",
1293 .of_match_table = of_match_ptr(atmel_tdes_dt_ids),
1294 },
1295};
1296
1297module_platform_driver(atmel_tdes_driver);
1298
1299MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
1300MODULE_LICENSE("GPL v2");
1301MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
1302