1
2
3
4
5
6
7
8
9
10
11
12#include <linux/clk.h>
13#include <linux/crypto.h>
14#include <linux/dma-mapping.h>
15#include <linux/err.h>
16#include <linux/errno.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/platform_device.h>
24#include <linux/scatterlist.h>
25
26#include <crypto/ctr.h>
27#include <crypto/aes.h>
28#include <crypto/algapi.h>
29#include <crypto/scatterwalk.h>
30
31#include <crypto/hash.h>
32#include <crypto/md5.h>
33#include <crypto/sha.h>
34#include <crypto/internal/hash.h>
35
36#define _SBF(s, v) ((v) << (s))
37
38
39#define SSS_REG_FCINTSTAT 0x0000
40#define SSS_FCINTSTAT_HPARTINT BIT(7)
41#define SSS_FCINTSTAT_HDONEINT BIT(5)
42#define SSS_FCINTSTAT_BRDMAINT BIT(3)
43#define SSS_FCINTSTAT_BTDMAINT BIT(2)
44#define SSS_FCINTSTAT_HRDMAINT BIT(1)
45#define SSS_FCINTSTAT_PKDMAINT BIT(0)
46
47#define SSS_REG_FCINTENSET 0x0004
48#define SSS_FCINTENSET_HPARTINTENSET BIT(7)
49#define SSS_FCINTENSET_HDONEINTENSET BIT(5)
50#define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
51#define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
52#define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
53#define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
54
55#define SSS_REG_FCINTENCLR 0x0008
56#define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
57#define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
58#define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
59#define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
60#define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
61#define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
62
63#define SSS_REG_FCINTPEND 0x000C
64#define SSS_FCINTPEND_HPARTINTP BIT(7)
65#define SSS_FCINTPEND_HDONEINTP BIT(5)
66#define SSS_FCINTPEND_BRDMAINTP BIT(3)
67#define SSS_FCINTPEND_BTDMAINTP BIT(2)
68#define SSS_FCINTPEND_HRDMAINTP BIT(1)
69#define SSS_FCINTPEND_PKDMAINTP BIT(0)
70
71#define SSS_REG_FCFIFOSTAT 0x0010
72#define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
73#define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
74#define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
75#define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
76#define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
77#define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
78#define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
79#define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
80
81#define SSS_REG_FCFIFOCTRL 0x0014
82#define SSS_FCFIFOCTRL_DESSEL BIT(2)
83#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
84#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
85#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
86#define SSS_HASHIN_MASK _SBF(0, 0x03)
87
88#define SSS_REG_FCBRDMAS 0x0020
89#define SSS_REG_FCBRDMAL 0x0024
90#define SSS_REG_FCBRDMAC 0x0028
91#define SSS_FCBRDMAC_BYTESWAP BIT(1)
92#define SSS_FCBRDMAC_FLUSH BIT(0)
93
94#define SSS_REG_FCBTDMAS 0x0030
95#define SSS_REG_FCBTDMAL 0x0034
96#define SSS_REG_FCBTDMAC 0x0038
97#define SSS_FCBTDMAC_BYTESWAP BIT(1)
98#define SSS_FCBTDMAC_FLUSH BIT(0)
99
100#define SSS_REG_FCHRDMAS 0x0040
101#define SSS_REG_FCHRDMAL 0x0044
102#define SSS_REG_FCHRDMAC 0x0048
103#define SSS_FCHRDMAC_BYTESWAP BIT(1)
104#define SSS_FCHRDMAC_FLUSH BIT(0)
105
106#define SSS_REG_FCPKDMAS 0x0050
107#define SSS_REG_FCPKDMAL 0x0054
108#define SSS_REG_FCPKDMAC 0x0058
109#define SSS_FCPKDMAC_BYTESWAP BIT(3)
110#define SSS_FCPKDMAC_DESCEND BIT(2)
111#define SSS_FCPKDMAC_TRANSMIT BIT(1)
112#define SSS_FCPKDMAC_FLUSH BIT(0)
113
114#define SSS_REG_FCPKDMAO 0x005C
115
116
117#define SSS_REG_AES_CONTROL 0x00
118#define SSS_AES_BYTESWAP_DI BIT(11)
119#define SSS_AES_BYTESWAP_DO BIT(10)
120#define SSS_AES_BYTESWAP_IV BIT(9)
121#define SSS_AES_BYTESWAP_CNT BIT(8)
122#define SSS_AES_BYTESWAP_KEY BIT(7)
123#define SSS_AES_KEY_CHANGE_MODE BIT(6)
124#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
125#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
126#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
127#define SSS_AES_FIFO_MODE BIT(3)
128#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
129#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
130#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
131#define SSS_AES_MODE_DECRYPT BIT(0)
132
133#define SSS_REG_AES_STATUS 0x04
134#define SSS_AES_BUSY BIT(2)
135#define SSS_AES_INPUT_READY BIT(1)
136#define SSS_AES_OUTPUT_READY BIT(0)
137
138#define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
139#define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
140#define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
141#define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
142#define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
143
144#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
145#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
146#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
147
148#define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
149#define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
150 SSS_AES_REG(dev, reg))
151
152
153#define FLAGS_AES_DECRYPT BIT(0)
154#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
155#define FLAGS_AES_CBC _SBF(1, 0x01)
156#define FLAGS_AES_CTR _SBF(1, 0x02)
157
158#define AES_KEY_LEN 16
159#define CRYPTO_QUEUE_LEN 1
160
161
162#define SSS_REG_HASH_CTRL 0x00
163
164#define SSS_HASH_USER_IV_EN BIT(5)
165#define SSS_HASH_INIT_BIT BIT(4)
166#define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
167#define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
168#define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
169
170#define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
171
172#define SSS_REG_HASH_CTRL_PAUSE 0x04
173
174#define SSS_HASH_PAUSE BIT(0)
175
176#define SSS_REG_HASH_CTRL_FIFO 0x08
177
178#define SSS_HASH_FIFO_MODE_DMA BIT(0)
179#define SSS_HASH_FIFO_MODE_CPU 0
180
181#define SSS_REG_HASH_CTRL_SWAP 0x0C
182
183#define SSS_HASH_BYTESWAP_DI BIT(3)
184#define SSS_HASH_BYTESWAP_DO BIT(2)
185#define SSS_HASH_BYTESWAP_IV BIT(1)
186#define SSS_HASH_BYTESWAP_KEY BIT(0)
187
188#define SSS_REG_HASH_STATUS 0x10
189
190#define SSS_HASH_STATUS_MSG_DONE BIT(6)
191#define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
192#define SSS_HASH_STATUS_BUFFER_READY BIT(0)
193
194#define SSS_REG_HASH_MSG_SIZE_LOW 0x20
195#define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
196
197#define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
198#define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
199
200#define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
201#define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
202
203#define HASH_BLOCK_SIZE 64
204#define HASH_REG_SIZEOF 4
205#define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
206#define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
207#define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
208
209
210
211
212
213
214
215#define HASH_FLAGS_BUSY 0
216#define HASH_FLAGS_FINAL 1
217#define HASH_FLAGS_DMA_ACTIVE 2
218#define HASH_FLAGS_OUTPUT_READY 3
219#define HASH_FLAGS_DMA_READY 4
220#define HASH_FLAGS_SGS_COPIED 5
221#define HASH_FLAGS_SGS_ALLOCED 6
222
223
224#define BUFLEN HASH_BLOCK_SIZE
225
226#define SSS_HASH_DMA_LEN_ALIGN 8
227#define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
228
229#define SSS_HASH_QUEUE_LENGTH 10
230
231
232
233
234
235
236
237
238
239
240struct samsung_aes_variant {
241 unsigned int aes_offset;
242 unsigned int hash_offset;
243};
244
245struct s5p_aes_reqctx {
246 unsigned long mode;
247};
248
249struct s5p_aes_ctx {
250 struct s5p_aes_dev *dev;
251
252 uint8_t aes_key[AES_MAX_KEY_SIZE];
253 uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
254 int keylen;
255};
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296struct s5p_aes_dev {
297 struct device *dev;
298 struct clk *clk;
299 void __iomem *ioaddr;
300 void __iomem *aes_ioaddr;
301 int irq_fc;
302
303 struct ablkcipher_request *req;
304 struct s5p_aes_ctx *ctx;
305 struct scatterlist *sg_src;
306 struct scatterlist *sg_dst;
307
308 struct scatterlist *sg_src_cpy;
309 struct scatterlist *sg_dst_cpy;
310
311 struct tasklet_struct tasklet;
312 struct crypto_queue queue;
313 bool busy;
314 spinlock_t lock;
315
316 struct resource *res;
317 void __iomem *io_hash_base;
318
319 spinlock_t hash_lock;
320 unsigned long hash_flags;
321 struct crypto_queue hash_queue;
322 struct tasklet_struct hash_tasklet;
323
324 u8 xmit_buf[BUFLEN];
325 struct ahash_request *hash_req;
326 struct scatterlist *hash_sg_iter;
327 unsigned int hash_sg_cnt;
328
329 bool use_hash;
330};
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350struct s5p_hash_reqctx {
351 struct s5p_aes_dev *dd;
352 bool op_update;
353
354 u64 digcnt;
355 u8 digest[SHA256_DIGEST_SIZE];
356
357 unsigned int nregs;
358 u32 engine;
359
360 struct scatterlist *sg;
361 unsigned int sg_len;
362 struct scatterlist sgl[2];
363 unsigned int skip;
364 unsigned int total;
365 bool finup;
366 bool error;
367
368 u32 bufcnt;
369 u8 buffer[0];
370};
371
372
373
374
375
376
377
378struct s5p_hash_ctx {
379 struct s5p_aes_dev *dd;
380 unsigned long flags;
381 struct crypto_shash *fallback;
382};
383
384static const struct samsung_aes_variant s5p_aes_data = {
385 .aes_offset = 0x4000,
386 .hash_offset = 0x6000,
387};
388
389static const struct samsung_aes_variant exynos_aes_data = {
390 .aes_offset = 0x200,
391 .hash_offset = 0x400,
392};
393
394static const struct of_device_id s5p_sss_dt_match[] = {
395 {
396 .compatible = "samsung,s5pv210-secss",
397 .data = &s5p_aes_data,
398 },
399 {
400 .compatible = "samsung,exynos4210-secss",
401 .data = &exynos_aes_data,
402 },
403 { },
404};
405MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
406
407static inline const struct samsung_aes_variant *find_s5p_sss_version
408 (const struct platform_device *pdev)
409{
410 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
411 const struct of_device_id *match;
412
413 match = of_match_node(s5p_sss_dt_match,
414 pdev->dev.of_node);
415 return (const struct samsung_aes_variant *)match->data;
416 }
417 return (const struct samsung_aes_variant *)
418 platform_get_device_id(pdev)->driver_data;
419}
420
421static struct s5p_aes_dev *s5p_dev;
422
423static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
424 const struct scatterlist *sg)
425{
426 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
427 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
428}
429
430static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
431 const struct scatterlist *sg)
432{
433 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
434 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
435}
436
437static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
438{
439 int len;
440
441 if (!*sg)
442 return;
443
444 len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
445 free_pages((unsigned long)sg_virt(*sg), get_order(len));
446
447 kfree(*sg);
448 *sg = NULL;
449}
450
451static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
452 unsigned int nbytes, int out)
453{
454 struct scatter_walk walk;
455
456 if (!nbytes)
457 return;
458
459 scatterwalk_start(&walk, sg);
460 scatterwalk_copychunks(buf, &walk, nbytes, out);
461 scatterwalk_done(&walk, out, 0);
462}
463
464static void s5p_sg_done(struct s5p_aes_dev *dev)
465{
466 if (dev->sg_dst_cpy) {
467 dev_dbg(dev->dev,
468 "Copying %d bytes of output data back to original place\n",
469 dev->req->nbytes);
470 s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
471 dev->req->nbytes, 1);
472 }
473 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
474 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
475}
476
477
478static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
479{
480 dev->req->base.complete(&dev->req->base, err);
481}
482
483static void s5p_unset_outdata(struct s5p_aes_dev *dev)
484{
485 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
486}
487
488static void s5p_unset_indata(struct s5p_aes_dev *dev)
489{
490 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
491}
492
493static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
494 struct scatterlist **dst)
495{
496 void *pages;
497 int len;
498
499 *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
500 if (!*dst)
501 return -ENOMEM;
502
503 len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
504 pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
505 if (!pages) {
506 kfree(*dst);
507 *dst = NULL;
508 return -ENOMEM;
509 }
510
511 s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
512
513 sg_init_table(*dst, 1);
514 sg_set_buf(*dst, pages, len);
515
516 return 0;
517}
518
519static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
520{
521 int err;
522
523 if (!sg->length) {
524 err = -EINVAL;
525 goto exit;
526 }
527
528 err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
529 if (!err) {
530 err = -ENOMEM;
531 goto exit;
532 }
533
534 dev->sg_dst = sg;
535 err = 0;
536
537exit:
538 return err;
539}
540
541static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
542{
543 int err;
544
545 if (!sg->length) {
546 err = -EINVAL;
547 goto exit;
548 }
549
550 err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
551 if (!err) {
552 err = -ENOMEM;
553 goto exit;
554 }
555
556 dev->sg_src = sg;
557 err = 0;
558
559exit:
560 return err;
561}
562
563
564
565
566
567
568
569
570static int s5p_aes_tx(struct s5p_aes_dev *dev)
571{
572 int ret = 0;
573
574 s5p_unset_outdata(dev);
575
576 if (!sg_is_last(dev->sg_dst)) {
577 ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
578 if (!ret)
579 ret = 1;
580 }
581
582 return ret;
583}
584
585
586
587
588
589
590
591
592static int s5p_aes_rx(struct s5p_aes_dev *dev)
593{
594 int ret = 0;
595
596 s5p_unset_indata(dev);
597
598 if (!sg_is_last(dev->sg_src)) {
599 ret = s5p_set_indata(dev, sg_next(dev->sg_src));
600 if (!ret)
601 ret = 1;
602 }
603
604 return ret;
605}
606
607static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
608{
609 return __raw_readl(dd->io_hash_base + offset);
610}
611
612static inline void s5p_hash_write(struct s5p_aes_dev *dd,
613 u32 offset, u32 value)
614{
615 __raw_writel(value, dd->io_hash_base + offset);
616}
617
618
619
620
621
622
623static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
624 const struct scatterlist *sg)
625{
626 dev->hash_sg_cnt--;
627 SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
628 SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg));
629}
630
631
632
633
634
635
636
637
638
639
640static int s5p_hash_rx(struct s5p_aes_dev *dev)
641{
642 if (dev->hash_sg_cnt > 0) {
643 dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
644 return 1;
645 }
646
647 set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
648 if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
649 return 0;
650
651 return 2;
652}
653
654static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
655{
656 struct platform_device *pdev = dev_id;
657 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
658 int err_dma_tx = 0;
659 int err_dma_rx = 0;
660 int err_dma_hx = 0;
661 bool tx_end = false;
662 bool hx_end = false;
663 unsigned long flags;
664 uint32_t status;
665 u32 st_bits;
666 int err;
667
668 spin_lock_irqsave(&dev->lock, flags);
669
670
671
672
673
674
675
676
677
678
679
680 status = SSS_READ(dev, FCINTSTAT);
681 if (status & SSS_FCINTSTAT_BRDMAINT)
682 err_dma_rx = s5p_aes_rx(dev);
683
684 if (status & SSS_FCINTSTAT_BTDMAINT) {
685 if (sg_is_last(dev->sg_dst))
686 tx_end = true;
687 err_dma_tx = s5p_aes_tx(dev);
688 }
689
690 if (status & SSS_FCINTSTAT_HRDMAINT)
691 err_dma_hx = s5p_hash_rx(dev);
692
693 st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
694 SSS_FCINTSTAT_HRDMAINT);
695
696 SSS_WRITE(dev, FCINTPEND, st_bits);
697
698
699 if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
700
701 if (status & SSS_FCINTSTAT_HPARTINT)
702 st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
703
704 if (status & SSS_FCINTSTAT_HDONEINT)
705 st_bits = SSS_HASH_STATUS_MSG_DONE;
706
707 set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
708 s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
709 hx_end = true;
710
711 err_dma_hx = 0;
712 }
713
714 if (err_dma_rx < 0) {
715 err = err_dma_rx;
716 goto error;
717 }
718 if (err_dma_tx < 0) {
719 err = err_dma_tx;
720 goto error;
721 }
722
723 if (tx_end) {
724 s5p_sg_done(dev);
725 if (err_dma_hx == 1)
726 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
727
728 spin_unlock_irqrestore(&dev->lock, flags);
729
730 s5p_aes_complete(dev, 0);
731
732 tasklet_schedule(&dev->tasklet);
733 } else {
734
735
736
737
738
739
740 if (err_dma_tx == 1)
741 s5p_set_dma_outdata(dev, dev->sg_dst);
742 if (err_dma_rx == 1)
743 s5p_set_dma_indata(dev, dev->sg_src);
744 if (err_dma_hx == 1)
745 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
746
747 spin_unlock_irqrestore(&dev->lock, flags);
748 }
749
750 goto hash_irq_end;
751
752error:
753 s5p_sg_done(dev);
754 dev->busy = false;
755 if (err_dma_hx == 1)
756 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
757
758 spin_unlock_irqrestore(&dev->lock, flags);
759 s5p_aes_complete(dev, err);
760
761hash_irq_end:
762
763
764
765
766
767 if (hx_end)
768 tasklet_schedule(&dev->hash_tasklet);
769 else if (err_dma_hx == 2)
770 s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
771 SSS_HASH_PAUSE);
772
773 return IRQ_HANDLED;
774}
775
776
777
778
779
780static void s5p_hash_read_msg(struct ahash_request *req)
781{
782 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
783 struct s5p_aes_dev *dd = ctx->dd;
784 u32 *hash = (u32 *)ctx->digest;
785 unsigned int i;
786
787 for (i = 0; i < ctx->nregs; i++)
788 hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
789}
790
791
792
793
794
795
796static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
797 const struct s5p_hash_reqctx *ctx)
798{
799 const u32 *hash = (const u32 *)ctx->digest;
800 unsigned int i;
801
802 for (i = 0; i < ctx->nregs; i++)
803 s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
804}
805
806
807
808
809
810static void s5p_hash_write_iv(struct ahash_request *req)
811{
812 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
813
814 s5p_hash_write_ctx_iv(ctx->dd, ctx);
815}
816
817
818
819
820
821static void s5p_hash_copy_result(struct ahash_request *req)
822{
823 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
824
825 if (!req->result)
826 return;
827
828 memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
829}
830
831
832
833
834
835static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
836{
837 SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
838}
839
840
841
842
843
844
845
846static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
847{
848 s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
849}
850
851
852
853
854
855
856static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
857{
858 SSS_WRITE(dev, FCINTENCLR, flags);
859}
860
861
862
863
864
865
866static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
867{
868 SSS_WRITE(dev, FCINTENSET, flags);
869}
870
871
872
873
874
875
876static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
877{
878 unsigned long flags;
879 u32 flow;
880
881 spin_lock_irqsave(&dev->lock, flags);
882
883 flow = SSS_READ(dev, FCFIFOCTRL);
884 flow &= ~SSS_HASHIN_MASK;
885 flow |= hashflow;
886 SSS_WRITE(dev, FCFIFOCTRL, flow);
887
888 spin_unlock_irqrestore(&dev->lock, flags);
889}
890
891
892
893
894
895
896
897
898
899static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
900{
901 s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
902 SSS_FCINTENCLR_HDONEINTENCLR |
903 SSS_FCINTENCLR_HPARTINTENCLR);
904 s5p_hash_dma_flush(dev);
905
906 s5p_hash_dma_enable(dev);
907 s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
908 s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
909 SSS_FCINTENSET_HDONEINTENSET |
910 SSS_FCINTENSET_HPARTINTENSET);
911}
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
928 bool final)
929{
930 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
931 u32 prelow, prehigh, low, high;
932 u32 configflags, swapflags;
933 u64 tmplen;
934
935 configflags = ctx->engine | SSS_HASH_INIT_BIT;
936
937 if (likely(ctx->digcnt)) {
938 s5p_hash_write_ctx_iv(dd, ctx);
939 configflags |= SSS_HASH_USER_IV_EN;
940 }
941
942 if (final) {
943
944 low = length;
945 high = 0;
946
947 tmplen = ctx->digcnt * 8;
948 prelow = (u32)tmplen;
949 prehigh = (u32)(tmplen >> 32);
950 } else {
951 prelow = 0;
952 prehigh = 0;
953 low = 0;
954 high = BIT(31);
955 }
956
957 swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
958 SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
959
960 s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
961 s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
962 s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
963 s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
964
965 s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
966 s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
967}
968
969
970
971
972
973
974
975
976
977static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
978 bool final)
979{
980 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
981 unsigned int cnt;
982
983 cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
984 if (!cnt) {
985 dev_err(dd->dev, "dma_map_sg error\n");
986 ctx->error = true;
987 return -EINVAL;
988 }
989
990 set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
991 dd->hash_sg_iter = ctx->sg;
992 dd->hash_sg_cnt = cnt;
993 s5p_hash_write_ctrl(dd, length, final);
994 ctx->digcnt += length;
995 ctx->total -= length;
996
997
998 if (final)
999 set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
1000
1001 s5p_set_dma_hashdata(dd, dd->hash_sg_iter);
1002
1003 return -EINPROGRESS;
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
1019 struct scatterlist *sg, unsigned int new_len)
1020{
1021 unsigned int pages, len;
1022 void *buf;
1023
1024 len = new_len + ctx->bufcnt;
1025 pages = get_order(len);
1026
1027 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
1028 if (!buf) {
1029 dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
1030 ctx->error = true;
1031 return -ENOMEM;
1032 }
1033
1034 if (ctx->bufcnt)
1035 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
1036
1037 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
1038 new_len, 0);
1039 sg_init_table(ctx->sgl, 1);
1040 sg_set_buf(ctx->sgl, buf, len);
1041 ctx->sg = ctx->sgl;
1042 ctx->sg_len = 1;
1043 ctx->bufcnt = 0;
1044 ctx->skip = 0;
1045 set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
1046
1047 return 0;
1048}
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
1065 struct scatterlist *sg, unsigned int new_len)
1066{
1067 unsigned int skip = ctx->skip, n = sg_nents(sg);
1068 struct scatterlist *tmp;
1069 unsigned int len;
1070
1071 if (ctx->bufcnt)
1072 n++;
1073
1074 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
1075 if (!ctx->sg) {
1076 ctx->error = true;
1077 return -ENOMEM;
1078 }
1079
1080 sg_init_table(ctx->sg, n);
1081
1082 tmp = ctx->sg;
1083
1084 ctx->sg_len = 0;
1085
1086 if (ctx->bufcnt) {
1087 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
1088 tmp = sg_next(tmp);
1089 ctx->sg_len++;
1090 }
1091
1092 while (sg && skip >= sg->length) {
1093 skip -= sg->length;
1094 sg = sg_next(sg);
1095 }
1096
1097 while (sg && new_len) {
1098 len = sg->length - skip;
1099 if (new_len < len)
1100 len = new_len;
1101
1102 new_len -= len;
1103 sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
1104 skip = 0;
1105 if (new_len <= 0)
1106 sg_mark_end(tmp);
1107
1108 tmp = sg_next(tmp);
1109 ctx->sg_len++;
1110 sg = sg_next(sg);
1111 }
1112
1113 set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
1114
1115 return 0;
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
1135 struct scatterlist *sg,
1136 unsigned int new_len, bool final)
1137{
1138 unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
1139 bool aligned = true, list_ok = true;
1140 struct scatterlist *sg_tmp = sg;
1141
1142 if (!sg || !sg->length || !new_len)
1143 return 0;
1144
1145 if (skip || !final)
1146 list_ok = false;
1147
1148 while (nbytes > 0 && sg_tmp) {
1149 n++;
1150 if (skip >= sg_tmp->length) {
1151 skip -= sg_tmp->length;
1152 if (!sg_tmp->length) {
1153 aligned = false;
1154 break;
1155 }
1156 } else {
1157 if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
1158 aligned = false;
1159 break;
1160 }
1161
1162 if (nbytes < sg_tmp->length - skip) {
1163 list_ok = false;
1164 break;
1165 }
1166
1167 nbytes -= sg_tmp->length - skip;
1168 skip = 0;
1169 }
1170
1171 sg_tmp = sg_next(sg_tmp);
1172 }
1173
1174 if (!aligned)
1175 return s5p_hash_copy_sgs(ctx, sg, new_len);
1176 else if (!list_ok)
1177 return s5p_hash_copy_sg_lists(ctx, sg, new_len);
1178
1179
1180
1181
1182
1183 if (ctx->bufcnt) {
1184 ctx->sg_len = n;
1185 sg_init_table(ctx->sgl, 2);
1186 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
1187 sg_chain(ctx->sgl, 2, sg);
1188 ctx->sg = ctx->sgl;
1189 ctx->sg_len++;
1190 } else {
1191 ctx->sg = sg;
1192 ctx->sg_len = n;
1193 }
1194
1195 return 0;
1196}
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
1209{
1210 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1211 bool final = ctx->finup;
1212 int xmit_len, hash_later, nbytes;
1213 int ret;
1214
1215 if (update)
1216 nbytes = req->nbytes;
1217 else
1218 nbytes = 0;
1219
1220 ctx->total = nbytes + ctx->bufcnt;
1221 if (!ctx->total)
1222 return 0;
1223
1224 if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
1225
1226 int len = BUFLEN - ctx->bufcnt % BUFLEN;
1227
1228 if (len > nbytes)
1229 len = nbytes;
1230
1231 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1232 0, len, 0);
1233 ctx->bufcnt += len;
1234 nbytes -= len;
1235 ctx->skip = len;
1236 } else {
1237 ctx->skip = 0;
1238 }
1239
1240 if (ctx->bufcnt)
1241 memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
1242
1243 xmit_len = ctx->total;
1244 if (final) {
1245 hash_later = 0;
1246 } else {
1247 if (IS_ALIGNED(xmit_len, BUFLEN))
1248 xmit_len -= BUFLEN;
1249 else
1250 xmit_len -= xmit_len & (BUFLEN - 1);
1251
1252 hash_later = ctx->total - xmit_len;
1253
1254
1255 scatterwalk_map_and_copy(ctx->buffer, req->src,
1256 req->nbytes - hash_later,
1257 hash_later, 0);
1258 }
1259
1260 if (xmit_len > BUFLEN) {
1261 ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
1262 final);
1263 if (ret)
1264 return ret;
1265 } else {
1266
1267 if (unlikely(!ctx->bufcnt)) {
1268
1269 scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
1270 0, xmit_len, 0);
1271 }
1272
1273 sg_init_table(ctx->sgl, 1);
1274 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
1275
1276 ctx->sg = ctx->sgl;
1277 ctx->sg_len = 1;
1278 }
1279
1280 ctx->bufcnt = hash_later;
1281 if (!final)
1282 ctx->total = xmit_len;
1283
1284 return 0;
1285}
1286
1287
1288
1289
1290
1291
1292
1293static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
1294{
1295 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
1296
1297 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
1298 clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
1299}
1300
1301
1302
1303
1304
1305static void s5p_hash_finish(struct ahash_request *req)
1306{
1307 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1308 struct s5p_aes_dev *dd = ctx->dd;
1309
1310 if (ctx->digcnt)
1311 s5p_hash_copy_result(req);
1312
1313 dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
1314}
1315
1316
1317
1318
1319
1320
1321static void s5p_hash_finish_req(struct ahash_request *req, int err)
1322{
1323 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1324 struct s5p_aes_dev *dd = ctx->dd;
1325 unsigned long flags;
1326
1327 if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
1328 free_pages((unsigned long)sg_virt(ctx->sg),
1329 get_order(ctx->sg->length));
1330
1331 if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
1332 kfree(ctx->sg);
1333
1334 ctx->sg = NULL;
1335 dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
1336 BIT(HASH_FLAGS_SGS_COPIED));
1337
1338 if (!err && !ctx->error) {
1339 s5p_hash_read_msg(req);
1340 if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
1341 s5p_hash_finish(req);
1342 } else {
1343 ctx->error = true;
1344 }
1345
1346 spin_lock_irqsave(&dd->hash_lock, flags);
1347 dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
1348 BIT(HASH_FLAGS_DMA_READY) |
1349 BIT(HASH_FLAGS_OUTPUT_READY));
1350 spin_unlock_irqrestore(&dd->hash_lock, flags);
1351
1352 if (req->base.complete)
1353 req->base.complete(&req->base, err);
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
1367 struct ahash_request *req)
1368{
1369 struct crypto_async_request *async_req, *backlog;
1370 struct s5p_hash_reqctx *ctx;
1371 unsigned long flags;
1372 int err = 0, ret = 0;
1373
1374retry:
1375 spin_lock_irqsave(&dd->hash_lock, flags);
1376 if (req)
1377 ret = ahash_enqueue_request(&dd->hash_queue, req);
1378
1379 if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
1380 spin_unlock_irqrestore(&dd->hash_lock, flags);
1381 return ret;
1382 }
1383
1384 backlog = crypto_get_backlog(&dd->hash_queue);
1385 async_req = crypto_dequeue_request(&dd->hash_queue);
1386 if (async_req)
1387 set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
1388
1389 spin_unlock_irqrestore(&dd->hash_lock, flags);
1390
1391 if (!async_req)
1392 return ret;
1393
1394 if (backlog)
1395 backlog->complete(backlog, -EINPROGRESS);
1396
1397 req = ahash_request_cast(async_req);
1398 dd->hash_req = req;
1399 ctx = ahash_request_ctx(req);
1400
1401 err = s5p_hash_prepare_request(req, ctx->op_update);
1402 if (err || !ctx->total)
1403 goto out;
1404
1405 dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
1406 ctx->op_update, req->nbytes);
1407
1408 s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
1409 if (ctx->digcnt)
1410 s5p_hash_write_iv(req);
1411
1412 if (ctx->op_update) {
1413 err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
1414 if (err != -EINPROGRESS && ctx->finup && !ctx->error)
1415
1416 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1417 } else {
1418 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1419 }
1420out:
1421 if (err != -EINPROGRESS) {
1422
1423 s5p_hash_finish_req(req, err);
1424 req = NULL;
1425
1426
1427
1428
1429
1430 goto retry;
1431 }
1432
1433 return ret;
1434}
1435
1436
1437
1438
1439
1440static void s5p_hash_tasklet_cb(unsigned long data)
1441{
1442 struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
1443
1444 if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
1445 s5p_hash_handle_queue(dd, NULL);
1446 return;
1447 }
1448
1449 if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
1450 if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
1451 &dd->hash_flags)) {
1452 s5p_hash_update_dma_stop(dd);
1453 }
1454
1455 if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
1456 &dd->hash_flags)) {
1457
1458 clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
1459 goto finish;
1460 }
1461 }
1462
1463 return;
1464
1465finish:
1466
1467 s5p_hash_finish_req(dd->hash_req, 0);
1468
1469
1470 if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
1471 s5p_hash_handle_queue(dd, NULL);
1472}
1473
1474
1475
1476
1477
1478
1479
1480
1481static int s5p_hash_enqueue(struct ahash_request *req, bool op)
1482{
1483 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1484 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1485
1486 ctx->op_update = op;
1487
1488 return s5p_hash_handle_queue(tctx->dd, req);
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500static int s5p_hash_update(struct ahash_request *req)
1501{
1502 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1503
1504 if (!req->nbytes)
1505 return 0;
1506
1507 if (ctx->bufcnt + req->nbytes <= BUFLEN) {
1508 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1509 0, req->nbytes, 0);
1510 ctx->bufcnt += req->nbytes;
1511 return 0;
1512 }
1513
1514 return s5p_hash_enqueue(req, true);
1515}
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
1526 const u8 *data, unsigned int len, u8 *out)
1527{
1528 SHASH_DESC_ON_STACK(shash, tfm);
1529
1530 shash->tfm = tfm;
1531 shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
1532
1533 return crypto_shash_digest(shash, data, len, out);
1534}
1535
1536
1537
1538
1539
1540static int s5p_hash_final_shash(struct ahash_request *req)
1541{
1542 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1543 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1544
1545 return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
1546 ctx->buffer, ctx->bufcnt, req->result);
1547}
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572static int s5p_hash_final(struct ahash_request *req)
1573{
1574 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1575
1576 ctx->finup = true;
1577 if (ctx->error)
1578 return -EINVAL;
1579
1580 if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
1581 return s5p_hash_final_shash(req);
1582
1583 return s5p_hash_enqueue(req, false);
1584}
1585
1586
1587
1588
1589
1590
1591
1592static int s5p_hash_finup(struct ahash_request *req)
1593{
1594 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1595 int err1, err2;
1596
1597 ctx->finup = true;
1598
1599 err1 = s5p_hash_update(req);
1600 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1601 return err1;
1602
1603
1604
1605
1606
1607
1608 err2 = s5p_hash_final(req);
1609
1610 return err1 ?: err2;
1611}
1612
1613
1614
1615
1616
1617
1618
1619static int s5p_hash_init(struct ahash_request *req)
1620{
1621 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1622 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1623 struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
1624
1625 ctx->dd = tctx->dd;
1626 ctx->error = false;
1627 ctx->finup = false;
1628 ctx->bufcnt = 0;
1629 ctx->digcnt = 0;
1630 ctx->total = 0;
1631 ctx->skip = 0;
1632
1633 dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
1634 crypto_ahash_digestsize(tfm));
1635
1636 switch (crypto_ahash_digestsize(tfm)) {
1637 case MD5_DIGEST_SIZE:
1638 ctx->engine = SSS_HASH_ENGINE_MD5;
1639 ctx->nregs = HASH_MD5_MAX_REG;
1640 break;
1641 case SHA1_DIGEST_SIZE:
1642 ctx->engine = SSS_HASH_ENGINE_SHA1;
1643 ctx->nregs = HASH_SHA1_MAX_REG;
1644 break;
1645 case SHA256_DIGEST_SIZE:
1646 ctx->engine = SSS_HASH_ENGINE_SHA256;
1647 ctx->nregs = HASH_SHA256_MAX_REG;
1648 break;
1649 default:
1650 ctx->error = true;
1651 return -EINVAL;
1652 }
1653
1654 return 0;
1655}
1656
1657
1658
1659
1660
1661
1662
1663static int s5p_hash_digest(struct ahash_request *req)
1664{
1665 return s5p_hash_init(req) ?: s5p_hash_finup(req);
1666}
1667
1668
1669
1670
1671
1672static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
1673{
1674 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
1675 const char *alg_name = crypto_tfm_alg_name(tfm);
1676
1677 tctx->dd = s5p_dev;
1678
1679 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1680 CRYPTO_ALG_NEED_FALLBACK);
1681 if (IS_ERR(tctx->fallback)) {
1682 pr_err("fallback alloc fails for '%s'\n", alg_name);
1683 return PTR_ERR(tctx->fallback);
1684 }
1685
1686 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1687 sizeof(struct s5p_hash_reqctx) + BUFLEN);
1688
1689 return 0;
1690}
1691
1692
1693
1694
1695
1696static int s5p_hash_cra_init(struct crypto_tfm *tfm)
1697{
1698 return s5p_hash_cra_init_alg(tfm);
1699}
1700
1701
1702
1703
1704
1705
1706
1707static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
1708{
1709 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
1710
1711 crypto_free_shash(tctx->fallback);
1712 tctx->fallback = NULL;
1713}
1714
1715
1716
1717
1718
1719
1720static int s5p_hash_export(struct ahash_request *req, void *out)
1721{
1722 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1723
1724 memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
1725
1726 return 0;
1727}
1728
1729
1730
1731
1732
1733
1734static int s5p_hash_import(struct ahash_request *req, const void *in)
1735{
1736 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1737 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1738 struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
1739 const struct s5p_hash_reqctx *ctx_in = in;
1740
1741 memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
1742 if (ctx_in->bufcnt > BUFLEN) {
1743 ctx->error = true;
1744 return -EINVAL;
1745 }
1746
1747 ctx->dd = tctx->dd;
1748 ctx->error = false;
1749
1750 return 0;
1751}
1752
1753static struct ahash_alg algs_sha1_md5_sha256[] = {
1754{
1755 .init = s5p_hash_init,
1756 .update = s5p_hash_update,
1757 .final = s5p_hash_final,
1758 .finup = s5p_hash_finup,
1759 .digest = s5p_hash_digest,
1760 .export = s5p_hash_export,
1761 .import = s5p_hash_import,
1762 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1763 .halg.digestsize = SHA1_DIGEST_SIZE,
1764 .halg.base = {
1765 .cra_name = "sha1",
1766 .cra_driver_name = "exynos-sha1",
1767 .cra_priority = 100,
1768 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1769 CRYPTO_ALG_KERN_DRIVER_ONLY |
1770 CRYPTO_ALG_ASYNC |
1771 CRYPTO_ALG_NEED_FALLBACK,
1772 .cra_blocksize = HASH_BLOCK_SIZE,
1773 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1774 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1775 .cra_module = THIS_MODULE,
1776 .cra_init = s5p_hash_cra_init,
1777 .cra_exit = s5p_hash_cra_exit,
1778 }
1779},
1780{
1781 .init = s5p_hash_init,
1782 .update = s5p_hash_update,
1783 .final = s5p_hash_final,
1784 .finup = s5p_hash_finup,
1785 .digest = s5p_hash_digest,
1786 .export = s5p_hash_export,
1787 .import = s5p_hash_import,
1788 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1789 .halg.digestsize = MD5_DIGEST_SIZE,
1790 .halg.base = {
1791 .cra_name = "md5",
1792 .cra_driver_name = "exynos-md5",
1793 .cra_priority = 100,
1794 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1795 CRYPTO_ALG_KERN_DRIVER_ONLY |
1796 CRYPTO_ALG_ASYNC |
1797 CRYPTO_ALG_NEED_FALLBACK,
1798 .cra_blocksize = HASH_BLOCK_SIZE,
1799 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1800 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1801 .cra_module = THIS_MODULE,
1802 .cra_init = s5p_hash_cra_init,
1803 .cra_exit = s5p_hash_cra_exit,
1804 }
1805},
1806{
1807 .init = s5p_hash_init,
1808 .update = s5p_hash_update,
1809 .final = s5p_hash_final,
1810 .finup = s5p_hash_finup,
1811 .digest = s5p_hash_digest,
1812 .export = s5p_hash_export,
1813 .import = s5p_hash_import,
1814 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1815 .halg.digestsize = SHA256_DIGEST_SIZE,
1816 .halg.base = {
1817 .cra_name = "sha256",
1818 .cra_driver_name = "exynos-sha256",
1819 .cra_priority = 100,
1820 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1821 CRYPTO_ALG_KERN_DRIVER_ONLY |
1822 CRYPTO_ALG_ASYNC |
1823 CRYPTO_ALG_NEED_FALLBACK,
1824 .cra_blocksize = HASH_BLOCK_SIZE,
1825 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1826 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1827 .cra_module = THIS_MODULE,
1828 .cra_init = s5p_hash_cra_init,
1829 .cra_exit = s5p_hash_cra_exit,
1830 }
1831}
1832
1833};
1834
1835static void s5p_set_aes(struct s5p_aes_dev *dev,
1836 const uint8_t *key, const uint8_t *iv,
1837 unsigned int keylen)
1838{
1839 void __iomem *keystart;
1840
1841 if (iv)
1842 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
1843
1844 if (keylen == AES_KEYSIZE_256)
1845 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
1846 else if (keylen == AES_KEYSIZE_192)
1847 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
1848 else
1849 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
1850
1851 memcpy_toio(keystart, key, keylen);
1852}
1853
1854static bool s5p_is_sg_aligned(struct scatterlist *sg)
1855{
1856 while (sg) {
1857 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
1858 return false;
1859 sg = sg_next(sg);
1860 }
1861
1862 return true;
1863}
1864
1865static int s5p_set_indata_start(struct s5p_aes_dev *dev,
1866 struct ablkcipher_request *req)
1867{
1868 struct scatterlist *sg;
1869 int err;
1870
1871 dev->sg_src_cpy = NULL;
1872 sg = req->src;
1873 if (!s5p_is_sg_aligned(sg)) {
1874 dev_dbg(dev->dev,
1875 "At least one unaligned source scatter list, making a copy\n");
1876 err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
1877 if (err)
1878 return err;
1879
1880 sg = dev->sg_src_cpy;
1881 }
1882
1883 err = s5p_set_indata(dev, sg);
1884 if (err) {
1885 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
1886 return err;
1887 }
1888
1889 return 0;
1890}
1891
1892static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
1893 struct ablkcipher_request *req)
1894{
1895 struct scatterlist *sg;
1896 int err;
1897
1898 dev->sg_dst_cpy = NULL;
1899 sg = req->dst;
1900 if (!s5p_is_sg_aligned(sg)) {
1901 dev_dbg(dev->dev,
1902 "At least one unaligned dest scatter list, making a copy\n");
1903 err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
1904 if (err)
1905 return err;
1906
1907 sg = dev->sg_dst_cpy;
1908 }
1909
1910 err = s5p_set_outdata(dev, sg);
1911 if (err) {
1912 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
1913 return err;
1914 }
1915
1916 return 0;
1917}
1918
1919static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1920{
1921 struct ablkcipher_request *req = dev->req;
1922 uint32_t aes_control;
1923 unsigned long flags;
1924 int err;
1925 u8 *iv;
1926
1927 aes_control = SSS_AES_KEY_CHANGE_MODE;
1928 if (mode & FLAGS_AES_DECRYPT)
1929 aes_control |= SSS_AES_MODE_DECRYPT;
1930
1931 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
1932 aes_control |= SSS_AES_CHAIN_MODE_CBC;
1933 iv = req->info;
1934 } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
1935 aes_control |= SSS_AES_CHAIN_MODE_CTR;
1936 iv = req->info;
1937 } else {
1938 iv = NULL;
1939 }
1940
1941 if (dev->ctx->keylen == AES_KEYSIZE_192)
1942 aes_control |= SSS_AES_KEY_SIZE_192;
1943 else if (dev->ctx->keylen == AES_KEYSIZE_256)
1944 aes_control |= SSS_AES_KEY_SIZE_256;
1945
1946 aes_control |= SSS_AES_FIFO_MODE;
1947
1948
1949 aes_control |= SSS_AES_BYTESWAP_DI
1950 | SSS_AES_BYTESWAP_DO
1951 | SSS_AES_BYTESWAP_IV
1952 | SSS_AES_BYTESWAP_KEY
1953 | SSS_AES_BYTESWAP_CNT;
1954
1955 spin_lock_irqsave(&dev->lock, flags);
1956
1957 SSS_WRITE(dev, FCINTENCLR,
1958 SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
1959 SSS_WRITE(dev, FCFIFOCTRL, 0x00);
1960
1961 err = s5p_set_indata_start(dev, req);
1962 if (err)
1963 goto indata_error;
1964
1965 err = s5p_set_outdata_start(dev, req);
1966 if (err)
1967 goto outdata_error;
1968
1969 SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
1970 s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
1971
1972 s5p_set_dma_indata(dev, dev->sg_src);
1973 s5p_set_dma_outdata(dev, dev->sg_dst);
1974
1975 SSS_WRITE(dev, FCINTENSET,
1976 SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
1977
1978 spin_unlock_irqrestore(&dev->lock, flags);
1979
1980 return;
1981
1982outdata_error:
1983 s5p_unset_indata(dev);
1984
1985indata_error:
1986 s5p_sg_done(dev);
1987 dev->busy = false;
1988 spin_unlock_irqrestore(&dev->lock, flags);
1989 s5p_aes_complete(dev, err);
1990}
1991
1992static void s5p_tasklet_cb(unsigned long data)
1993{
1994 struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
1995 struct crypto_async_request *async_req, *backlog;
1996 struct s5p_aes_reqctx *reqctx;
1997 unsigned long flags;
1998
1999 spin_lock_irqsave(&dev->lock, flags);
2000 backlog = crypto_get_backlog(&dev->queue);
2001 async_req = crypto_dequeue_request(&dev->queue);
2002
2003 if (!async_req) {
2004 dev->busy = false;
2005 spin_unlock_irqrestore(&dev->lock, flags);
2006 return;
2007 }
2008 spin_unlock_irqrestore(&dev->lock, flags);
2009
2010 if (backlog)
2011 backlog->complete(backlog, -EINPROGRESS);
2012
2013 dev->req = ablkcipher_request_cast(async_req);
2014 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
2015 reqctx = ablkcipher_request_ctx(dev->req);
2016
2017 s5p_aes_crypt_start(dev, reqctx->mode);
2018}
2019
2020static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
2021 struct ablkcipher_request *req)
2022{
2023 unsigned long flags;
2024 int err;
2025
2026 spin_lock_irqsave(&dev->lock, flags);
2027 err = ablkcipher_enqueue_request(&dev->queue, req);
2028 if (dev->busy) {
2029 spin_unlock_irqrestore(&dev->lock, flags);
2030 goto exit;
2031 }
2032 dev->busy = true;
2033
2034 spin_unlock_irqrestore(&dev->lock, flags);
2035
2036 tasklet_schedule(&dev->tasklet);
2037
2038exit:
2039 return err;
2040}
2041
2042static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
2043{
2044 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
2045 struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
2046 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
2047 struct s5p_aes_dev *dev = ctx->dev;
2048
2049 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
2050 dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
2051 return -EINVAL;
2052 }
2053
2054 reqctx->mode = mode;
2055
2056 return s5p_aes_handle_req(dev, req);
2057}
2058
2059static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
2060 const uint8_t *key, unsigned int keylen)
2061{
2062 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
2063 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
2064
2065 if (keylen != AES_KEYSIZE_128 &&
2066 keylen != AES_KEYSIZE_192 &&
2067 keylen != AES_KEYSIZE_256)
2068 return -EINVAL;
2069
2070 memcpy(ctx->aes_key, key, keylen);
2071 ctx->keylen = keylen;
2072
2073 return 0;
2074}
2075
2076static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
2077{
2078 return s5p_aes_crypt(req, 0);
2079}
2080
2081static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
2082{
2083 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
2084}
2085
2086static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
2087{
2088 return s5p_aes_crypt(req, FLAGS_AES_CBC);
2089}
2090
2091static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
2092{
2093 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
2094}
2095
2096static int s5p_aes_cra_init(struct crypto_tfm *tfm)
2097{
2098 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
2099
2100 ctx->dev = s5p_dev;
2101 tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
2102
2103 return 0;
2104}
2105
2106static struct crypto_alg algs[] = {
2107 {
2108 .cra_name = "ecb(aes)",
2109 .cra_driver_name = "ecb-aes-s5p",
2110 .cra_priority = 100,
2111 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2112 CRYPTO_ALG_ASYNC |
2113 CRYPTO_ALG_KERN_DRIVER_ONLY,
2114 .cra_blocksize = AES_BLOCK_SIZE,
2115 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
2116 .cra_alignmask = 0x0f,
2117 .cra_type = &crypto_ablkcipher_type,
2118 .cra_module = THIS_MODULE,
2119 .cra_init = s5p_aes_cra_init,
2120 .cra_u.ablkcipher = {
2121 .min_keysize = AES_MIN_KEY_SIZE,
2122 .max_keysize = AES_MAX_KEY_SIZE,
2123 .setkey = s5p_aes_setkey,
2124 .encrypt = s5p_aes_ecb_encrypt,
2125 .decrypt = s5p_aes_ecb_decrypt,
2126 }
2127 },
2128 {
2129 .cra_name = "cbc(aes)",
2130 .cra_driver_name = "cbc-aes-s5p",
2131 .cra_priority = 100,
2132 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2133 CRYPTO_ALG_ASYNC |
2134 CRYPTO_ALG_KERN_DRIVER_ONLY,
2135 .cra_blocksize = AES_BLOCK_SIZE,
2136 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
2137 .cra_alignmask = 0x0f,
2138 .cra_type = &crypto_ablkcipher_type,
2139 .cra_module = THIS_MODULE,
2140 .cra_init = s5p_aes_cra_init,
2141 .cra_u.ablkcipher = {
2142 .min_keysize = AES_MIN_KEY_SIZE,
2143 .max_keysize = AES_MAX_KEY_SIZE,
2144 .ivsize = AES_BLOCK_SIZE,
2145 .setkey = s5p_aes_setkey,
2146 .encrypt = s5p_aes_cbc_encrypt,
2147 .decrypt = s5p_aes_cbc_decrypt,
2148 }
2149 },
2150};
2151
2152static int s5p_aes_probe(struct platform_device *pdev)
2153{
2154 struct device *dev = &pdev->dev;
2155 int i, j, err = -ENODEV;
2156 const struct samsung_aes_variant *variant;
2157 struct s5p_aes_dev *pdata;
2158 struct resource *res;
2159 unsigned int hash_i;
2160
2161 if (s5p_dev)
2162 return -EEXIST;
2163
2164 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2165 if (!pdata)
2166 return -ENOMEM;
2167
2168 variant = find_s5p_sss_version(pdev);
2169 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2170
2171
2172
2173
2174
2175
2176
2177 if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
2178 if (variant == &exynos_aes_data) {
2179 res->end += 0x300;
2180 pdata->use_hash = true;
2181 }
2182 }
2183
2184 pdata->res = res;
2185 pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
2186 if (IS_ERR(pdata->ioaddr)) {
2187 if (!pdata->use_hash)
2188 return PTR_ERR(pdata->ioaddr);
2189
2190 res->end -= 0x300;
2191 pdata->use_hash = false;
2192 pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
2193 if (IS_ERR(pdata->ioaddr))
2194 return PTR_ERR(pdata->ioaddr);
2195 }
2196
2197 pdata->clk = devm_clk_get(dev, "secss");
2198 if (IS_ERR(pdata->clk)) {
2199 dev_err(dev, "failed to find secss clock source\n");
2200 return -ENOENT;
2201 }
2202
2203 err = clk_prepare_enable(pdata->clk);
2204 if (err < 0) {
2205 dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
2206 return err;
2207 }
2208
2209 spin_lock_init(&pdata->lock);
2210 spin_lock_init(&pdata->hash_lock);
2211
2212 pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
2213 pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
2214
2215 pdata->irq_fc = platform_get_irq(pdev, 0);
2216 if (pdata->irq_fc < 0) {
2217 err = pdata->irq_fc;
2218 dev_warn(dev, "feed control interrupt is not available.\n");
2219 goto err_irq;
2220 }
2221 err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
2222 s5p_aes_interrupt, IRQF_ONESHOT,
2223 pdev->name, pdev);
2224 if (err < 0) {
2225 dev_warn(dev, "feed control interrupt is not available.\n");
2226 goto err_irq;
2227 }
2228
2229 pdata->busy = false;
2230 pdata->dev = dev;
2231 platform_set_drvdata(pdev, pdata);
2232 s5p_dev = pdata;
2233
2234 tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
2235 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
2236
2237 for (i = 0; i < ARRAY_SIZE(algs); i++) {
2238 err = crypto_register_alg(&algs[i]);
2239 if (err)
2240 goto err_algs;
2241 }
2242
2243 if (pdata->use_hash) {
2244 tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
2245 (unsigned long)pdata);
2246 crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
2247
2248 for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
2249 hash_i++) {
2250 struct ahash_alg *alg;
2251
2252 alg = &algs_sha1_md5_sha256[hash_i];
2253 err = crypto_register_ahash(alg);
2254 if (err) {
2255 dev_err(dev, "can't register '%s': %d\n",
2256 alg->halg.base.cra_driver_name, err);
2257 goto err_hash;
2258 }
2259 }
2260 }
2261
2262 dev_info(dev, "s5p-sss driver registered\n");
2263
2264 return 0;
2265
2266err_hash:
2267 for (j = hash_i - 1; j >= 0; j--)
2268 crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
2269
2270 tasklet_kill(&pdata->hash_tasklet);
2271 res->end -= 0x300;
2272
2273err_algs:
2274 if (i < ARRAY_SIZE(algs))
2275 dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
2276 err);
2277
2278 for (j = 0; j < i; j++)
2279 crypto_unregister_alg(&algs[j]);
2280
2281 tasklet_kill(&pdata->tasklet);
2282
2283err_irq:
2284 clk_disable_unprepare(pdata->clk);
2285
2286 s5p_dev = NULL;
2287
2288 return err;
2289}
2290
2291static int s5p_aes_remove(struct platform_device *pdev)
2292{
2293 struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
2294 int i;
2295
2296 if (!pdata)
2297 return -ENODEV;
2298
2299 for (i = 0; i < ARRAY_SIZE(algs); i++)
2300 crypto_unregister_alg(&algs[i]);
2301
2302 tasklet_kill(&pdata->tasklet);
2303 if (pdata->use_hash) {
2304 for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
2305 crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
2306
2307 pdata->res->end -= 0x300;
2308 tasklet_kill(&pdata->hash_tasklet);
2309 pdata->use_hash = false;
2310 }
2311
2312 clk_disable_unprepare(pdata->clk);
2313 s5p_dev = NULL;
2314
2315 return 0;
2316}
2317
2318static struct platform_driver s5p_aes_crypto = {
2319 .probe = s5p_aes_probe,
2320 .remove = s5p_aes_remove,
2321 .driver = {
2322 .name = "s5p-secss",
2323 .of_match_table = s5p_sss_dt_match,
2324 },
2325};
2326
2327module_platform_driver(s5p_aes_crypto);
2328
2329MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
2330MODULE_LICENSE("GPL v2");
2331MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
2332MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");
2333