1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/hw_random.h>
24#include <linux/platform_device.h>
25
26#include <linux/device.h>
27#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/interrupt.h>
30#include <linux/irq.h>
31#include <linux/scatterlist.h>
32#include <linux/dma-mapping.h>
33#include <linux/of_device.h>
34#include <linux/delay.h>
35#include <linux/crypto.h>
36#include <linux/cryptohash.h>
37#include <crypto/scatterwalk.h>
38#include <crypto/algapi.h>
39#include <crypto/aes.h>
40#include <crypto/hash.h>
41#include <crypto/internal/hash.h>
42#include <linux/platform_data/crypto-atmel.h>
43#include <dt-bindings/dma/at91.h>
44#include "atmel-aes-regs.h"
45
46#define CFB8_BLOCK_SIZE 1
47#define CFB16_BLOCK_SIZE 2
48#define CFB32_BLOCK_SIZE 4
49#define CFB64_BLOCK_SIZE 8
50
51
52#define AES_FLAGS_MODE_MASK 0x03ff
53#define AES_FLAGS_ENCRYPT BIT(0)
54#define AES_FLAGS_CBC BIT(1)
55#define AES_FLAGS_CFB BIT(2)
56#define AES_FLAGS_CFB8 BIT(3)
57#define AES_FLAGS_CFB16 BIT(4)
58#define AES_FLAGS_CFB32 BIT(5)
59#define AES_FLAGS_CFB64 BIT(6)
60#define AES_FLAGS_CFB128 BIT(7)
61#define AES_FLAGS_OFB BIT(8)
62#define AES_FLAGS_CTR BIT(9)
63
64#define AES_FLAGS_INIT BIT(16)
65#define AES_FLAGS_DMA BIT(17)
66#define AES_FLAGS_BUSY BIT(18)
67#define AES_FLAGS_FAST BIT(19)
68
69#define ATMEL_AES_QUEUE_LENGTH 50
70
71#define ATMEL_AES_DMA_THRESHOLD 16
72
73
74struct atmel_aes_caps {
75 bool has_dualbuff;
76 bool has_cfb64;
77 u32 max_burst_size;
78};
79
80struct atmel_aes_dev;
81
82struct atmel_aes_ctx {
83 struct atmel_aes_dev *dd;
84
85 int keylen;
86 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
87
88 u16 block_size;
89};
90
91struct atmel_aes_reqctx {
92 unsigned long mode;
93};
94
95struct atmel_aes_dma {
96 struct dma_chan *chan;
97 struct dma_slave_config dma_conf;
98};
99
100struct atmel_aes_dev {
101 struct list_head list;
102 unsigned long phys_base;
103 void __iomem *io_base;
104
105 struct atmel_aes_ctx *ctx;
106 struct device *dev;
107 struct clk *iclk;
108 int irq;
109
110 unsigned long flags;
111 int err;
112
113 spinlock_t lock;
114 struct crypto_queue queue;
115
116 struct tasklet_struct done_task;
117 struct tasklet_struct queue_task;
118
119 struct ablkcipher_request *req;
120 size_t total;
121
122 struct scatterlist *in_sg;
123 unsigned int nb_in_sg;
124 size_t in_offset;
125 struct scatterlist *out_sg;
126 unsigned int nb_out_sg;
127 size_t out_offset;
128
129 size_t bufcnt;
130 size_t buflen;
131 size_t dma_size;
132
133 void *buf_in;
134 int dma_in;
135 dma_addr_t dma_addr_in;
136 struct atmel_aes_dma dma_lch_in;
137
138 void *buf_out;
139 int dma_out;
140 dma_addr_t dma_addr_out;
141 struct atmel_aes_dma dma_lch_out;
142
143 struct atmel_aes_caps caps;
144
145 u32 hw_version;
146};
147
148struct atmel_aes_drv {
149 struct list_head dev_list;
150 spinlock_t lock;
151};
152
153static struct atmel_aes_drv atmel_aes = {
154 .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
155 .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
156};
157
158static int atmel_aes_sg_length(struct ablkcipher_request *req,
159 struct scatterlist *sg)
160{
161 unsigned int total = req->nbytes;
162 int sg_nb;
163 unsigned int len;
164 struct scatterlist *sg_list;
165
166 sg_nb = 0;
167 sg_list = sg;
168 total = req->nbytes;
169
170 while (total) {
171 len = min(sg_list->length, total);
172
173 sg_nb++;
174 total -= len;
175
176 sg_list = sg_next(sg_list);
177 if (!sg_list)
178 total = 0;
179 }
180
181 return sg_nb;
182}
183
184static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset,
185 void *buf, size_t buflen, size_t total, int out)
186{
187 unsigned int count, off = 0;
188
189 while (buflen && total) {
190 count = min((*sg)->length - *offset, total);
191 count = min(count, buflen);
192
193 if (!count)
194 return off;
195
196 scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
197
198 off += count;
199 buflen -= count;
200 *offset += count;
201 total -= count;
202
203 if (*offset == (*sg)->length) {
204 *sg = sg_next(*sg);
205 if (*sg)
206 *offset = 0;
207 else
208 total = 0;
209 }
210 }
211
212 return off;
213}
214
215static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
216{
217 return readl_relaxed(dd->io_base + offset);
218}
219
220static inline void atmel_aes_write(struct atmel_aes_dev *dd,
221 u32 offset, u32 value)
222{
223 writel_relaxed(value, dd->io_base + offset);
224}
225
226static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
227 u32 *value, int count)
228{
229 for (; count--; value++, offset += 4)
230 *value = atmel_aes_read(dd, offset);
231}
232
233static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
234 u32 *value, int count)
235{
236 for (; count--; value++, offset += 4)
237 atmel_aes_write(dd, offset, *value);
238}
239
240static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
241{
242 struct atmel_aes_dev *aes_dd = NULL;
243 struct atmel_aes_dev *tmp;
244
245 spin_lock_bh(&atmel_aes.lock);
246 if (!ctx->dd) {
247 list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
248 aes_dd = tmp;
249 break;
250 }
251 ctx->dd = aes_dd;
252 } else {
253 aes_dd = ctx->dd;
254 }
255
256 spin_unlock_bh(&atmel_aes.lock);
257
258 return aes_dd;
259}
260
261static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
262{
263 clk_prepare_enable(dd->iclk);
264
265 if (!(dd->flags & AES_FLAGS_INIT)) {
266 atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
267 atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
268 dd->flags |= AES_FLAGS_INIT;
269 dd->err = 0;
270 }
271
272 return 0;
273}
274
275static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
276{
277 return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
278}
279
280static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
281{
282 atmel_aes_hw_init(dd);
283
284 dd->hw_version = atmel_aes_get_version(dd);
285
286 dev_info(dd->dev,
287 "version: 0x%x\n", dd->hw_version);
288
289 clk_disable_unprepare(dd->iclk);
290}
291
292static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
293{
294 struct ablkcipher_request *req = dd->req;
295
296 clk_disable_unprepare(dd->iclk);
297 dd->flags &= ~AES_FLAGS_BUSY;
298
299 req->base.complete(&req->base, err);
300}
301
302static void atmel_aes_dma_callback(void *data)
303{
304 struct atmel_aes_dev *dd = data;
305
306
307 tasklet_schedule(&dd->done_task);
308}
309
310static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
311 dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length)
312{
313 struct scatterlist sg[2];
314 struct dma_async_tx_descriptor *in_desc, *out_desc;
315
316 dd->dma_size = length;
317
318 if (!(dd->flags & AES_FLAGS_FAST)) {
319 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
320 DMA_TO_DEVICE);
321 }
322
323 if (dd->flags & AES_FLAGS_CFB8) {
324 dd->dma_lch_in.dma_conf.dst_addr_width =
325 DMA_SLAVE_BUSWIDTH_1_BYTE;
326 dd->dma_lch_out.dma_conf.src_addr_width =
327 DMA_SLAVE_BUSWIDTH_1_BYTE;
328 } else if (dd->flags & AES_FLAGS_CFB16) {
329 dd->dma_lch_in.dma_conf.dst_addr_width =
330 DMA_SLAVE_BUSWIDTH_2_BYTES;
331 dd->dma_lch_out.dma_conf.src_addr_width =
332 DMA_SLAVE_BUSWIDTH_2_BYTES;
333 } else {
334 dd->dma_lch_in.dma_conf.dst_addr_width =
335 DMA_SLAVE_BUSWIDTH_4_BYTES;
336 dd->dma_lch_out.dma_conf.src_addr_width =
337 DMA_SLAVE_BUSWIDTH_4_BYTES;
338 }
339
340 if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 |
341 AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) {
342 dd->dma_lch_in.dma_conf.src_maxburst = 1;
343 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
344 dd->dma_lch_out.dma_conf.src_maxburst = 1;
345 dd->dma_lch_out.dma_conf.dst_maxburst = 1;
346 } else {
347 dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
348 dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
349 dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
350 dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
351 }
352
353 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
354 dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
355
356 dd->flags |= AES_FLAGS_DMA;
357
358 sg_init_table(&sg[0], 1);
359 sg_dma_address(&sg[0]) = dma_addr_in;
360 sg_dma_len(&sg[0]) = length;
361
362 sg_init_table(&sg[1], 1);
363 sg_dma_address(&sg[1]) = dma_addr_out;
364 sg_dma_len(&sg[1]) = length;
365
366 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
367 1, DMA_MEM_TO_DEV,
368 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
369 if (!in_desc)
370 return -EINVAL;
371
372 out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
373 1, DMA_DEV_TO_MEM,
374 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
375 if (!out_desc)
376 return -EINVAL;
377
378 out_desc->callback = atmel_aes_dma_callback;
379 out_desc->callback_param = dd;
380
381 dmaengine_submit(out_desc);
382 dma_async_issue_pending(dd->dma_lch_out.chan);
383
384 dmaengine_submit(in_desc);
385 dma_async_issue_pending(dd->dma_lch_in.chan);
386
387 return 0;
388}
389
390static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
391{
392 dd->flags &= ~AES_FLAGS_DMA;
393
394
395 dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
396 if (!dd->nb_in_sg)
397 return -EINVAL;
398
399 dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
400 if (!dd->nb_out_sg)
401 return -EINVAL;
402
403 dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
404 dd->buf_in, dd->total);
405
406 if (!dd->bufcnt)
407 return -EINVAL;
408
409 dd->total -= dd->bufcnt;
410
411 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
412 atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
413 dd->bufcnt >> 2);
414
415 return 0;
416}
417
418static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
419{
420 int err, fast = 0, in, out;
421 size_t count;
422 dma_addr_t addr_in, addr_out;
423
424 if ((!dd->in_offset) && (!dd->out_offset)) {
425
426 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
427 IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
428 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
429 IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
430 fast = in && out;
431
432 if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
433 fast = 0;
434 }
435
436
437 if (fast) {
438 count = min(dd->total, sg_dma_len(dd->in_sg));
439 count = min(count, sg_dma_len(dd->out_sg));
440
441 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
442 if (!err) {
443 dev_err(dd->dev, "dma_map_sg() error\n");
444 return -EINVAL;
445 }
446
447 err = dma_map_sg(dd->dev, dd->out_sg, 1,
448 DMA_FROM_DEVICE);
449 if (!err) {
450 dev_err(dd->dev, "dma_map_sg() error\n");
451 dma_unmap_sg(dd->dev, dd->in_sg, 1,
452 DMA_TO_DEVICE);
453 return -EINVAL;
454 }
455
456 addr_in = sg_dma_address(dd->in_sg);
457 addr_out = sg_dma_address(dd->out_sg);
458
459 dd->flags |= AES_FLAGS_FAST;
460
461 } else {
462
463 count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
464 dd->buf_in, dd->buflen, dd->total, 0);
465
466 addr_in = dd->dma_addr_in;
467 addr_out = dd->dma_addr_out;
468
469 dd->flags &= ~AES_FLAGS_FAST;
470 }
471
472 dd->total -= count;
473
474 err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
475
476 if (err && (dd->flags & AES_FLAGS_FAST)) {
477 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
478 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
479 }
480
481 return err;
482}
483
484static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
485{
486 int err;
487 u32 valcr = 0, valmr = 0;
488
489 err = atmel_aes_hw_init(dd);
490
491 if (err)
492 return err;
493
494
495 if (dd->ctx->keylen == AES_KEYSIZE_128)
496 valmr |= AES_MR_KEYSIZE_128;
497 else if (dd->ctx->keylen == AES_KEYSIZE_192)
498 valmr |= AES_MR_KEYSIZE_192;
499 else
500 valmr |= AES_MR_KEYSIZE_256;
501
502 if (dd->flags & AES_FLAGS_CBC) {
503 valmr |= AES_MR_OPMOD_CBC;
504 } else if (dd->flags & AES_FLAGS_CFB) {
505 valmr |= AES_MR_OPMOD_CFB;
506 if (dd->flags & AES_FLAGS_CFB8)
507 valmr |= AES_MR_CFBS_8b;
508 else if (dd->flags & AES_FLAGS_CFB16)
509 valmr |= AES_MR_CFBS_16b;
510 else if (dd->flags & AES_FLAGS_CFB32)
511 valmr |= AES_MR_CFBS_32b;
512 else if (dd->flags & AES_FLAGS_CFB64)
513 valmr |= AES_MR_CFBS_64b;
514 else if (dd->flags & AES_FLAGS_CFB128)
515 valmr |= AES_MR_CFBS_128b;
516 } else if (dd->flags & AES_FLAGS_OFB) {
517 valmr |= AES_MR_OPMOD_OFB;
518 } else if (dd->flags & AES_FLAGS_CTR) {
519 valmr |= AES_MR_OPMOD_CTR;
520 } else {
521 valmr |= AES_MR_OPMOD_ECB;
522 }
523
524 if (dd->flags & AES_FLAGS_ENCRYPT)
525 valmr |= AES_MR_CYPHER_ENC;
526
527 if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
528 valmr |= AES_MR_SMOD_IDATAR0;
529 if (dd->caps.has_dualbuff)
530 valmr |= AES_MR_DUALBUFF;
531 } else {
532 valmr |= AES_MR_SMOD_AUTO;
533 }
534
535 atmel_aes_write(dd, AES_CR, valcr);
536 atmel_aes_write(dd, AES_MR, valmr);
537
538 atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
539 dd->ctx->keylen >> 2);
540
541 if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
542 (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
543 dd->req->info) {
544 atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
545 }
546
547 return 0;
548}
549
550static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
551 struct ablkcipher_request *req)
552{
553 struct crypto_async_request *async_req, *backlog;
554 struct atmel_aes_ctx *ctx;
555 struct atmel_aes_reqctx *rctx;
556 unsigned long flags;
557 int err, ret = 0;
558
559 spin_lock_irqsave(&dd->lock, flags);
560 if (req)
561 ret = ablkcipher_enqueue_request(&dd->queue, req);
562 if (dd->flags & AES_FLAGS_BUSY) {
563 spin_unlock_irqrestore(&dd->lock, flags);
564 return ret;
565 }
566 backlog = crypto_get_backlog(&dd->queue);
567 async_req = crypto_dequeue_request(&dd->queue);
568 if (async_req)
569 dd->flags |= AES_FLAGS_BUSY;
570 spin_unlock_irqrestore(&dd->lock, flags);
571
572 if (!async_req)
573 return ret;
574
575 if (backlog)
576 backlog->complete(backlog, -EINPROGRESS);
577
578 req = ablkcipher_request_cast(async_req);
579
580
581 dd->req = req;
582 dd->total = req->nbytes;
583 dd->in_offset = 0;
584 dd->in_sg = req->src;
585 dd->out_offset = 0;
586 dd->out_sg = req->dst;
587
588 rctx = ablkcipher_request_ctx(req);
589 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
590 rctx->mode &= AES_FLAGS_MODE_MASK;
591 dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
592 dd->ctx = ctx;
593 ctx->dd = dd;
594
595 err = atmel_aes_write_ctrl(dd);
596 if (!err) {
597 if (dd->total > ATMEL_AES_DMA_THRESHOLD)
598 err = atmel_aes_crypt_dma_start(dd);
599 else
600 err = atmel_aes_crypt_cpu_start(dd);
601 }
602 if (err) {
603
604 atmel_aes_finish_req(dd, err);
605 tasklet_schedule(&dd->queue_task);
606 }
607
608 return ret;
609}
610
611static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
612{
613 int err = -EINVAL;
614 size_t count;
615
616 if (dd->flags & AES_FLAGS_DMA) {
617 err = 0;
618 if (dd->flags & AES_FLAGS_FAST) {
619 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
620 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
621 } else {
622 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
623 dd->dma_size, DMA_FROM_DEVICE);
624
625
626 count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
627 dd->buf_out, dd->buflen, dd->dma_size, 1);
628 if (count != dd->dma_size) {
629 err = -EINVAL;
630 pr_err("not all data converted: %u\n", count);
631 }
632 }
633 }
634
635 return err;
636}
637
638
639static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
640{
641 int err = -ENOMEM;
642
643 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
644 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
645 dd->buflen = PAGE_SIZE;
646 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
647
648 if (!dd->buf_in || !dd->buf_out) {
649 dev_err(dd->dev, "unable to alloc pages.\n");
650 goto err_alloc;
651 }
652
653
654 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
655 dd->buflen, DMA_TO_DEVICE);
656 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
657 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
658 err = -EINVAL;
659 goto err_map_in;
660 }
661
662 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
663 dd->buflen, DMA_FROM_DEVICE);
664 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
665 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
666 err = -EINVAL;
667 goto err_map_out;
668 }
669
670 return 0;
671
672err_map_out:
673 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
674 DMA_TO_DEVICE);
675err_map_in:
676 free_page((unsigned long)dd->buf_out);
677 free_page((unsigned long)dd->buf_in);
678err_alloc:
679 if (err)
680 pr_err("error: %d\n", err);
681 return err;
682}
683
684static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
685{
686 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
687 DMA_FROM_DEVICE);
688 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
689 DMA_TO_DEVICE);
690 free_page((unsigned long)dd->buf_out);
691 free_page((unsigned long)dd->buf_in);
692}
693
694static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
695{
696 struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
697 crypto_ablkcipher_reqtfm(req));
698 struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
699 struct atmel_aes_dev *dd;
700
701 if (mode & AES_FLAGS_CFB8) {
702 if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
703 pr_err("request size is not exact amount of CFB8 blocks\n");
704 return -EINVAL;
705 }
706 ctx->block_size = CFB8_BLOCK_SIZE;
707 } else if (mode & AES_FLAGS_CFB16) {
708 if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
709 pr_err("request size is not exact amount of CFB16 blocks\n");
710 return -EINVAL;
711 }
712 ctx->block_size = CFB16_BLOCK_SIZE;
713 } else if (mode & AES_FLAGS_CFB32) {
714 if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
715 pr_err("request size is not exact amount of CFB32 blocks\n");
716 return -EINVAL;
717 }
718 ctx->block_size = CFB32_BLOCK_SIZE;
719 } else {
720 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
721 pr_err("request size is not exact amount of AES blocks\n");
722 return -EINVAL;
723 }
724 ctx->block_size = AES_BLOCK_SIZE;
725 }
726
727 dd = atmel_aes_find_dev(ctx);
728 if (!dd)
729 return -ENODEV;
730
731 rctx->mode = mode;
732
733 return atmel_aes_handle_queue(dd, req);
734}
735
736static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
737{
738 struct at_dma_slave *sl = slave;
739
740 if (sl && sl->dma_dev == chan->device->dev) {
741 chan->private = sl;
742 return true;
743 } else {
744 return false;
745 }
746}
747
748static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
749 struct crypto_platform_data *pdata)
750{
751 int err = -ENOMEM;
752 dma_cap_mask_t mask;
753
754 dma_cap_zero(mask);
755 dma_cap_set(DMA_SLAVE, mask);
756
757
758 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
759 atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
760 if (!dd->dma_lch_in.chan)
761 goto err_dma_in;
762
763 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
764 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
765 AES_IDATAR(0);
766 dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
767 dd->dma_lch_in.dma_conf.src_addr_width =
768 DMA_SLAVE_BUSWIDTH_4_BYTES;
769 dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
770 dd->dma_lch_in.dma_conf.dst_addr_width =
771 DMA_SLAVE_BUSWIDTH_4_BYTES;
772 dd->dma_lch_in.dma_conf.device_fc = false;
773
774 dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
775 atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
776 if (!dd->dma_lch_out.chan)
777 goto err_dma_out;
778
779 dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
780 dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
781 AES_ODATAR(0);
782 dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
783 dd->dma_lch_out.dma_conf.src_addr_width =
784 DMA_SLAVE_BUSWIDTH_4_BYTES;
785 dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
786 dd->dma_lch_out.dma_conf.dst_addr_width =
787 DMA_SLAVE_BUSWIDTH_4_BYTES;
788 dd->dma_lch_out.dma_conf.device_fc = false;
789
790 return 0;
791
792err_dma_out:
793 dma_release_channel(dd->dma_lch_in.chan);
794err_dma_in:
795 dev_warn(dd->dev, "no DMA channel available\n");
796 return err;
797}
798
799static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
800{
801 dma_release_channel(dd->dma_lch_in.chan);
802 dma_release_channel(dd->dma_lch_out.chan);
803}
804
805static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
806 unsigned int keylen)
807{
808 struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
809
810 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
811 keylen != AES_KEYSIZE_256) {
812 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
813 return -EINVAL;
814 }
815
816 memcpy(ctx->key, key, keylen);
817 ctx->keylen = keylen;
818
819 return 0;
820}
821
822static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
823{
824 return atmel_aes_crypt(req,
825 AES_FLAGS_ENCRYPT);
826}
827
828static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
829{
830 return atmel_aes_crypt(req,
831 0);
832}
833
834static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
835{
836 return atmel_aes_crypt(req,
837 AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
838}
839
840static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
841{
842 return atmel_aes_crypt(req,
843 AES_FLAGS_CBC);
844}
845
846static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
847{
848 return atmel_aes_crypt(req,
849 AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
850}
851
852static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
853{
854 return atmel_aes_crypt(req,
855 AES_FLAGS_OFB);
856}
857
858static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
859{
860 return atmel_aes_crypt(req,
861 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128);
862}
863
864static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
865{
866 return atmel_aes_crypt(req,
867 AES_FLAGS_CFB | AES_FLAGS_CFB128);
868}
869
870static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
871{
872 return atmel_aes_crypt(req,
873 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
874}
875
876static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
877{
878 return atmel_aes_crypt(req,
879 AES_FLAGS_CFB | AES_FLAGS_CFB64);
880}
881
882static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
883{
884 return atmel_aes_crypt(req,
885 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
886}
887
888static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
889{
890 return atmel_aes_crypt(req,
891 AES_FLAGS_CFB | AES_FLAGS_CFB32);
892}
893
894static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
895{
896 return atmel_aes_crypt(req,
897 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
898}
899
900static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
901{
902 return atmel_aes_crypt(req,
903 AES_FLAGS_CFB | AES_FLAGS_CFB16);
904}
905
906static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
907{
908 return atmel_aes_crypt(req,
909 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8);
910}
911
912static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
913{
914 return atmel_aes_crypt(req,
915 AES_FLAGS_CFB | AES_FLAGS_CFB8);
916}
917
918static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
919{
920 return atmel_aes_crypt(req,
921 AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
922}
923
924static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
925{
926 return atmel_aes_crypt(req,
927 AES_FLAGS_CTR);
928}
929
930static int atmel_aes_cra_init(struct crypto_tfm *tfm)
931{
932 tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
933
934 return 0;
935}
936
937static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
938{
939}
940
941static struct crypto_alg aes_algs[] = {
942{
943 .cra_name = "ecb(aes)",
944 .cra_driver_name = "atmel-ecb-aes",
945 .cra_priority = 100,
946 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
947 .cra_blocksize = AES_BLOCK_SIZE,
948 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
949 .cra_alignmask = 0xf,
950 .cra_type = &crypto_ablkcipher_type,
951 .cra_module = THIS_MODULE,
952 .cra_init = atmel_aes_cra_init,
953 .cra_exit = atmel_aes_cra_exit,
954 .cra_u.ablkcipher = {
955 .min_keysize = AES_MIN_KEY_SIZE,
956 .max_keysize = AES_MAX_KEY_SIZE,
957 .setkey = atmel_aes_setkey,
958 .encrypt = atmel_aes_ecb_encrypt,
959 .decrypt = atmel_aes_ecb_decrypt,
960 }
961},
962{
963 .cra_name = "cbc(aes)",
964 .cra_driver_name = "atmel-cbc-aes",
965 .cra_priority = 100,
966 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
967 .cra_blocksize = AES_BLOCK_SIZE,
968 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
969 .cra_alignmask = 0xf,
970 .cra_type = &crypto_ablkcipher_type,
971 .cra_module = THIS_MODULE,
972 .cra_init = atmel_aes_cra_init,
973 .cra_exit = atmel_aes_cra_exit,
974 .cra_u.ablkcipher = {
975 .min_keysize = AES_MIN_KEY_SIZE,
976 .max_keysize = AES_MAX_KEY_SIZE,
977 .ivsize = AES_BLOCK_SIZE,
978 .setkey = atmel_aes_setkey,
979 .encrypt = atmel_aes_cbc_encrypt,
980 .decrypt = atmel_aes_cbc_decrypt,
981 }
982},
983{
984 .cra_name = "ofb(aes)",
985 .cra_driver_name = "atmel-ofb-aes",
986 .cra_priority = 100,
987 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
988 .cra_blocksize = AES_BLOCK_SIZE,
989 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
990 .cra_alignmask = 0xf,
991 .cra_type = &crypto_ablkcipher_type,
992 .cra_module = THIS_MODULE,
993 .cra_init = atmel_aes_cra_init,
994 .cra_exit = atmel_aes_cra_exit,
995 .cra_u.ablkcipher = {
996 .min_keysize = AES_MIN_KEY_SIZE,
997 .max_keysize = AES_MAX_KEY_SIZE,
998 .ivsize = AES_BLOCK_SIZE,
999 .setkey = atmel_aes_setkey,
1000 .encrypt = atmel_aes_ofb_encrypt,
1001 .decrypt = atmel_aes_ofb_decrypt,
1002 }
1003},
1004{
1005 .cra_name = "cfb(aes)",
1006 .cra_driver_name = "atmel-cfb-aes",
1007 .cra_priority = 100,
1008 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1009 .cra_blocksize = AES_BLOCK_SIZE,
1010 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1011 .cra_alignmask = 0xf,
1012 .cra_type = &crypto_ablkcipher_type,
1013 .cra_module = THIS_MODULE,
1014 .cra_init = atmel_aes_cra_init,
1015 .cra_exit = atmel_aes_cra_exit,
1016 .cra_u.ablkcipher = {
1017 .min_keysize = AES_MIN_KEY_SIZE,
1018 .max_keysize = AES_MAX_KEY_SIZE,
1019 .ivsize = AES_BLOCK_SIZE,
1020 .setkey = atmel_aes_setkey,
1021 .encrypt = atmel_aes_cfb_encrypt,
1022 .decrypt = atmel_aes_cfb_decrypt,
1023 }
1024},
1025{
1026 .cra_name = "cfb32(aes)",
1027 .cra_driver_name = "atmel-cfb32-aes",
1028 .cra_priority = 100,
1029 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1030 .cra_blocksize = CFB32_BLOCK_SIZE,
1031 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1032 .cra_alignmask = 0x3,
1033 .cra_type = &crypto_ablkcipher_type,
1034 .cra_module = THIS_MODULE,
1035 .cra_init = atmel_aes_cra_init,
1036 .cra_exit = atmel_aes_cra_exit,
1037 .cra_u.ablkcipher = {
1038 .min_keysize = AES_MIN_KEY_SIZE,
1039 .max_keysize = AES_MAX_KEY_SIZE,
1040 .ivsize = AES_BLOCK_SIZE,
1041 .setkey = atmel_aes_setkey,
1042 .encrypt = atmel_aes_cfb32_encrypt,
1043 .decrypt = atmel_aes_cfb32_decrypt,
1044 }
1045},
1046{
1047 .cra_name = "cfb16(aes)",
1048 .cra_driver_name = "atmel-cfb16-aes",
1049 .cra_priority = 100,
1050 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1051 .cra_blocksize = CFB16_BLOCK_SIZE,
1052 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1053 .cra_alignmask = 0x1,
1054 .cra_type = &crypto_ablkcipher_type,
1055 .cra_module = THIS_MODULE,
1056 .cra_init = atmel_aes_cra_init,
1057 .cra_exit = atmel_aes_cra_exit,
1058 .cra_u.ablkcipher = {
1059 .min_keysize = AES_MIN_KEY_SIZE,
1060 .max_keysize = AES_MAX_KEY_SIZE,
1061 .ivsize = AES_BLOCK_SIZE,
1062 .setkey = atmel_aes_setkey,
1063 .encrypt = atmel_aes_cfb16_encrypt,
1064 .decrypt = atmel_aes_cfb16_decrypt,
1065 }
1066},
1067{
1068 .cra_name = "cfb8(aes)",
1069 .cra_driver_name = "atmel-cfb8-aes",
1070 .cra_priority = 100,
1071 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1072 .cra_blocksize = CFB64_BLOCK_SIZE,
1073 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1074 .cra_alignmask = 0x0,
1075 .cra_type = &crypto_ablkcipher_type,
1076 .cra_module = THIS_MODULE,
1077 .cra_init = atmel_aes_cra_init,
1078 .cra_exit = atmel_aes_cra_exit,
1079 .cra_u.ablkcipher = {
1080 .min_keysize = AES_MIN_KEY_SIZE,
1081 .max_keysize = AES_MAX_KEY_SIZE,
1082 .ivsize = AES_BLOCK_SIZE,
1083 .setkey = atmel_aes_setkey,
1084 .encrypt = atmel_aes_cfb8_encrypt,
1085 .decrypt = atmel_aes_cfb8_decrypt,
1086 }
1087},
1088{
1089 .cra_name = "ctr(aes)",
1090 .cra_driver_name = "atmel-ctr-aes",
1091 .cra_priority = 100,
1092 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1093 .cra_blocksize = AES_BLOCK_SIZE,
1094 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1095 .cra_alignmask = 0xf,
1096 .cra_type = &crypto_ablkcipher_type,
1097 .cra_module = THIS_MODULE,
1098 .cra_init = atmel_aes_cra_init,
1099 .cra_exit = atmel_aes_cra_exit,
1100 .cra_u.ablkcipher = {
1101 .min_keysize = AES_MIN_KEY_SIZE,
1102 .max_keysize = AES_MAX_KEY_SIZE,
1103 .ivsize = AES_BLOCK_SIZE,
1104 .setkey = atmel_aes_setkey,
1105 .encrypt = atmel_aes_ctr_encrypt,
1106 .decrypt = atmel_aes_ctr_decrypt,
1107 }
1108},
1109};
1110
1111static struct crypto_alg aes_cfb64_alg = {
1112 .cra_name = "cfb64(aes)",
1113 .cra_driver_name = "atmel-cfb64-aes",
1114 .cra_priority = 100,
1115 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1116 .cra_blocksize = CFB64_BLOCK_SIZE,
1117 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1118 .cra_alignmask = 0x7,
1119 .cra_type = &crypto_ablkcipher_type,
1120 .cra_module = THIS_MODULE,
1121 .cra_init = atmel_aes_cra_init,
1122 .cra_exit = atmel_aes_cra_exit,
1123 .cra_u.ablkcipher = {
1124 .min_keysize = AES_MIN_KEY_SIZE,
1125 .max_keysize = AES_MAX_KEY_SIZE,
1126 .ivsize = AES_BLOCK_SIZE,
1127 .setkey = atmel_aes_setkey,
1128 .encrypt = atmel_aes_cfb64_encrypt,
1129 .decrypt = atmel_aes_cfb64_decrypt,
1130 }
1131};
1132
1133static void atmel_aes_queue_task(unsigned long data)
1134{
1135 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1136
1137 atmel_aes_handle_queue(dd, NULL);
1138}
1139
1140static void atmel_aes_done_task(unsigned long data)
1141{
1142 struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
1143 int err;
1144
1145 if (!(dd->flags & AES_FLAGS_DMA)) {
1146 atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
1147 dd->bufcnt >> 2);
1148
1149 if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
1150 dd->buf_out, dd->bufcnt))
1151 err = 0;
1152 else
1153 err = -EINVAL;
1154
1155 goto cpu_end;
1156 }
1157
1158 err = atmel_aes_crypt_dma_stop(dd);
1159
1160 err = dd->err ? : err;
1161
1162 if (dd->total && !err) {
1163 if (dd->flags & AES_FLAGS_FAST) {
1164 dd->in_sg = sg_next(dd->in_sg);
1165 dd->out_sg = sg_next(dd->out_sg);
1166 if (!dd->in_sg || !dd->out_sg)
1167 err = -EINVAL;
1168 }
1169 if (!err)
1170 err = atmel_aes_crypt_dma_start(dd);
1171 if (!err)
1172 return;
1173 }
1174
1175cpu_end:
1176 atmel_aes_finish_req(dd, err);
1177 atmel_aes_handle_queue(dd, NULL);
1178}
1179
1180static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
1181{
1182 struct atmel_aes_dev *aes_dd = dev_id;
1183 u32 reg;
1184
1185 reg = atmel_aes_read(aes_dd, AES_ISR);
1186 if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
1187 atmel_aes_write(aes_dd, AES_IDR, reg);
1188 if (AES_FLAGS_BUSY & aes_dd->flags)
1189 tasklet_schedule(&aes_dd->done_task);
1190 else
1191 dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
1192 return IRQ_HANDLED;
1193 }
1194
1195 return IRQ_NONE;
1196}
1197
1198static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1199{
1200 int i;
1201
1202 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1203 crypto_unregister_alg(&aes_algs[i]);
1204 if (dd->caps.has_cfb64)
1205 crypto_unregister_alg(&aes_cfb64_alg);
1206}
1207
1208static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1209{
1210 int err, i, j;
1211
1212 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1213 err = crypto_register_alg(&aes_algs[i]);
1214 if (err)
1215 goto err_aes_algs;
1216 }
1217
1218 if (dd->caps.has_cfb64) {
1219 err = crypto_register_alg(&aes_cfb64_alg);
1220 if (err)
1221 goto err_aes_cfb64_alg;
1222 }
1223
1224 return 0;
1225
1226err_aes_cfb64_alg:
1227 i = ARRAY_SIZE(aes_algs);
1228err_aes_algs:
1229 for (j = 0; j < i; j++)
1230 crypto_unregister_alg(&aes_algs[j]);
1231
1232 return err;
1233}
1234
1235static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1236{
1237 dd->caps.has_dualbuff = 0;
1238 dd->caps.has_cfb64 = 0;
1239 dd->caps.max_burst_size = 1;
1240
1241
1242 switch (dd->hw_version & 0xff0) {
1243 case 0x130:
1244 dd->caps.has_dualbuff = 1;
1245 dd->caps.has_cfb64 = 1;
1246 dd->caps.max_burst_size = 4;
1247 break;
1248 case 0x120:
1249 break;
1250 default:
1251 dev_warn(dd->dev,
1252 "Unmanaged aes version, set minimum capabilities\n");
1253 break;
1254 }
1255}
1256
1257#if defined(CONFIG_OF)
1258static const struct of_device_id atmel_aes_dt_ids[] = {
1259 { .compatible = "atmel,at91sam9g46-aes" },
1260 { }
1261};
1262MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
1263
1264static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1265{
1266 struct device_node *np = pdev->dev.of_node;
1267 struct crypto_platform_data *pdata;
1268
1269 if (!np) {
1270 dev_err(&pdev->dev, "device node not found\n");
1271 return ERR_PTR(-EINVAL);
1272 }
1273
1274 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1275 if (!pdata) {
1276 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1277 return ERR_PTR(-ENOMEM);
1278 }
1279
1280 pdata->dma_slave = devm_kzalloc(&pdev->dev,
1281 sizeof(*(pdata->dma_slave)),
1282 GFP_KERNEL);
1283 if (!pdata->dma_slave) {
1284 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1285 devm_kfree(&pdev->dev, pdata);
1286 return ERR_PTR(-ENOMEM);
1287 }
1288
1289 return pdata;
1290}
1291#else
1292static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1293{
1294 return ERR_PTR(-EINVAL);
1295}
1296#endif
1297
1298static int atmel_aes_probe(struct platform_device *pdev)
1299{
1300 struct atmel_aes_dev *aes_dd;
1301 struct crypto_platform_data *pdata;
1302 struct device *dev = &pdev->dev;
1303 struct resource *aes_res;
1304 unsigned long aes_phys_size;
1305 int err;
1306
1307 pdata = pdev->dev.platform_data;
1308 if (!pdata) {
1309 pdata = atmel_aes_of_init(pdev);
1310 if (IS_ERR(pdata)) {
1311 err = PTR_ERR(pdata);
1312 goto aes_dd_err;
1313 }
1314 }
1315
1316 if (!pdata->dma_slave) {
1317 err = -ENXIO;
1318 goto aes_dd_err;
1319 }
1320
1321 aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL);
1322 if (aes_dd == NULL) {
1323 dev_err(dev, "unable to alloc data struct.\n");
1324 err = -ENOMEM;
1325 goto aes_dd_err;
1326 }
1327
1328 aes_dd->dev = dev;
1329
1330 platform_set_drvdata(pdev, aes_dd);
1331
1332 INIT_LIST_HEAD(&aes_dd->list);
1333
1334 tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
1335 (unsigned long)aes_dd);
1336 tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
1337 (unsigned long)aes_dd);
1338
1339 crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
1340
1341 aes_dd->irq = -1;
1342
1343
1344 aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1345 if (!aes_res) {
1346 dev_err(dev, "no MEM resource info\n");
1347 err = -ENODEV;
1348 goto res_err;
1349 }
1350 aes_dd->phys_base = aes_res->start;
1351 aes_phys_size = resource_size(aes_res);
1352
1353
1354 aes_dd->irq = platform_get_irq(pdev, 0);
1355 if (aes_dd->irq < 0) {
1356 dev_err(dev, "no IRQ resource info\n");
1357 err = aes_dd->irq;
1358 goto aes_irq_err;
1359 }
1360
1361 err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes",
1362 aes_dd);
1363 if (err) {
1364 dev_err(dev, "unable to request aes irq.\n");
1365 goto aes_irq_err;
1366 }
1367
1368
1369 aes_dd->iclk = clk_get(&pdev->dev, "aes_clk");
1370 if (IS_ERR(aes_dd->iclk)) {
1371 dev_err(dev, "clock intialization failed.\n");
1372 err = PTR_ERR(aes_dd->iclk);
1373 goto clk_err;
1374 }
1375
1376 aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size);
1377 if (!aes_dd->io_base) {
1378 dev_err(dev, "can't ioremap\n");
1379 err = -ENOMEM;
1380 goto aes_io_err;
1381 }
1382
1383 atmel_aes_hw_version_init(aes_dd);
1384
1385 atmel_aes_get_cap(aes_dd);
1386
1387 err = atmel_aes_buff_init(aes_dd);
1388 if (err)
1389 goto err_aes_buff;
1390
1391 err = atmel_aes_dma_init(aes_dd, pdata);
1392 if (err)
1393 goto err_aes_dma;
1394
1395 spin_lock(&atmel_aes.lock);
1396 list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
1397 spin_unlock(&atmel_aes.lock);
1398
1399 err = atmel_aes_register_algs(aes_dd);
1400 if (err)
1401 goto err_algs;
1402
1403 dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
1404 dma_chan_name(aes_dd->dma_lch_in.chan),
1405 dma_chan_name(aes_dd->dma_lch_out.chan));
1406
1407 return 0;
1408
1409err_algs:
1410 spin_lock(&atmel_aes.lock);
1411 list_del(&aes_dd->list);
1412 spin_unlock(&atmel_aes.lock);
1413 atmel_aes_dma_cleanup(aes_dd);
1414err_aes_dma:
1415 atmel_aes_buff_cleanup(aes_dd);
1416err_aes_buff:
1417 iounmap(aes_dd->io_base);
1418aes_io_err:
1419 clk_put(aes_dd->iclk);
1420clk_err:
1421 free_irq(aes_dd->irq, aes_dd);
1422aes_irq_err:
1423res_err:
1424 tasklet_kill(&aes_dd->done_task);
1425 tasklet_kill(&aes_dd->queue_task);
1426 kfree(aes_dd);
1427 aes_dd = NULL;
1428aes_dd_err:
1429 dev_err(dev, "initialization failed.\n");
1430
1431 return err;
1432}
1433
1434static int atmel_aes_remove(struct platform_device *pdev)
1435{
1436 static struct atmel_aes_dev *aes_dd;
1437
1438 aes_dd = platform_get_drvdata(pdev);
1439 if (!aes_dd)
1440 return -ENODEV;
1441 spin_lock(&atmel_aes.lock);
1442 list_del(&aes_dd->list);
1443 spin_unlock(&atmel_aes.lock);
1444
1445 atmel_aes_unregister_algs(aes_dd);
1446
1447 tasklet_kill(&aes_dd->done_task);
1448 tasklet_kill(&aes_dd->queue_task);
1449
1450 atmel_aes_dma_cleanup(aes_dd);
1451
1452 iounmap(aes_dd->io_base);
1453
1454 clk_put(aes_dd->iclk);
1455
1456 if (aes_dd->irq > 0)
1457 free_irq(aes_dd->irq, aes_dd);
1458
1459 kfree(aes_dd);
1460 aes_dd = NULL;
1461
1462 return 0;
1463}
1464
1465static struct platform_driver atmel_aes_driver = {
1466 .probe = atmel_aes_probe,
1467 .remove = atmel_aes_remove,
1468 .driver = {
1469 .name = "atmel_aes",
1470 .owner = THIS_MODULE,
1471 .of_match_table = of_match_ptr(atmel_aes_dt_ids),
1472 },
1473};
1474
1475module_platform_driver(atmel_aes_driver);
1476
1477MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
1478MODULE_LICENSE("GPL v2");
1479MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
1480