1
2
3
4
5
6
7
8
9
10
11
12#include <linux/clk.h>
13#include <linux/crypto.h>
14#include <linux/dma-mapping.h>
15#include <linux/err.h>
16#include <linux/errno.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/platform_device.h>
24#include <linux/scatterlist.h>
25
26#include <crypto/ctr.h>
27#include <crypto/aes.h>
28#include <crypto/algapi.h>
29#include <crypto/scatterwalk.h>
30
31#include <crypto/hash.h>
32#include <crypto/md5.h>
33#include <crypto/sha.h>
34#include <crypto/internal/hash.h>
35
36#define _SBF(s, v) ((v) << (s))
37
38
39#define SSS_REG_FCINTSTAT 0x0000
40#define SSS_FCINTSTAT_HPARTINT BIT(7)
41#define SSS_FCINTSTAT_HDONEINT BIT(5)
42#define SSS_FCINTSTAT_BRDMAINT BIT(3)
43#define SSS_FCINTSTAT_BTDMAINT BIT(2)
44#define SSS_FCINTSTAT_HRDMAINT BIT(1)
45#define SSS_FCINTSTAT_PKDMAINT BIT(0)
46
47#define SSS_REG_FCINTENSET 0x0004
48#define SSS_FCINTENSET_HPARTINTENSET BIT(7)
49#define SSS_FCINTENSET_HDONEINTENSET BIT(5)
50#define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
51#define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
52#define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
53#define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
54
55#define SSS_REG_FCINTENCLR 0x0008
56#define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
57#define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
58#define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
59#define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
60#define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
61#define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
62
63#define SSS_REG_FCINTPEND 0x000C
64#define SSS_FCINTPEND_HPARTINTP BIT(7)
65#define SSS_FCINTPEND_HDONEINTP BIT(5)
66#define SSS_FCINTPEND_BRDMAINTP BIT(3)
67#define SSS_FCINTPEND_BTDMAINTP BIT(2)
68#define SSS_FCINTPEND_HRDMAINTP BIT(1)
69#define SSS_FCINTPEND_PKDMAINTP BIT(0)
70
71#define SSS_REG_FCFIFOSTAT 0x0010
72#define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
73#define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
74#define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
75#define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
76#define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
77#define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
78#define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
79#define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
80
81#define SSS_REG_FCFIFOCTRL 0x0014
82#define SSS_FCFIFOCTRL_DESSEL BIT(2)
83#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
84#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
85#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
86#define SSS_HASHIN_MASK _SBF(0, 0x03)
87
88#define SSS_REG_FCBRDMAS 0x0020
89#define SSS_REG_FCBRDMAL 0x0024
90#define SSS_REG_FCBRDMAC 0x0028
91#define SSS_FCBRDMAC_BYTESWAP BIT(1)
92#define SSS_FCBRDMAC_FLUSH BIT(0)
93
94#define SSS_REG_FCBTDMAS 0x0030
95#define SSS_REG_FCBTDMAL 0x0034
96#define SSS_REG_FCBTDMAC 0x0038
97#define SSS_FCBTDMAC_BYTESWAP BIT(1)
98#define SSS_FCBTDMAC_FLUSH BIT(0)
99
100#define SSS_REG_FCHRDMAS 0x0040
101#define SSS_REG_FCHRDMAL 0x0044
102#define SSS_REG_FCHRDMAC 0x0048
103#define SSS_FCHRDMAC_BYTESWAP BIT(1)
104#define SSS_FCHRDMAC_FLUSH BIT(0)
105
106#define SSS_REG_FCPKDMAS 0x0050
107#define SSS_REG_FCPKDMAL 0x0054
108#define SSS_REG_FCPKDMAC 0x0058
109#define SSS_FCPKDMAC_BYTESWAP BIT(3)
110#define SSS_FCPKDMAC_DESCEND BIT(2)
111#define SSS_FCPKDMAC_TRANSMIT BIT(1)
112#define SSS_FCPKDMAC_FLUSH BIT(0)
113
114#define SSS_REG_FCPKDMAO 0x005C
115
116
117#define SSS_REG_AES_CONTROL 0x00
118#define SSS_AES_BYTESWAP_DI BIT(11)
119#define SSS_AES_BYTESWAP_DO BIT(10)
120#define SSS_AES_BYTESWAP_IV BIT(9)
121#define SSS_AES_BYTESWAP_CNT BIT(8)
122#define SSS_AES_BYTESWAP_KEY BIT(7)
123#define SSS_AES_KEY_CHANGE_MODE BIT(6)
124#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
125#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
126#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
127#define SSS_AES_FIFO_MODE BIT(3)
128#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
129#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
130#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
131#define SSS_AES_MODE_DECRYPT BIT(0)
132
133#define SSS_REG_AES_STATUS 0x04
134#define SSS_AES_BUSY BIT(2)
135#define SSS_AES_INPUT_READY BIT(1)
136#define SSS_AES_OUTPUT_READY BIT(0)
137
138#define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
139#define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
140#define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
141#define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
142#define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
143
144#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
145#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
146#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
147
148#define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
149#define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
150 SSS_AES_REG(dev, reg))
151
152
153#define FLAGS_AES_DECRYPT BIT(0)
154#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
155#define FLAGS_AES_CBC _SBF(1, 0x01)
156#define FLAGS_AES_CTR _SBF(1, 0x02)
157
158#define AES_KEY_LEN 16
159#define CRYPTO_QUEUE_LEN 1
160
161
162#define SSS_REG_HASH_CTRL 0x00
163
164#define SSS_HASH_USER_IV_EN BIT(5)
165#define SSS_HASH_INIT_BIT BIT(4)
166#define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
167#define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
168#define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
169
170#define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
171
172#define SSS_REG_HASH_CTRL_PAUSE 0x04
173
174#define SSS_HASH_PAUSE BIT(0)
175
176#define SSS_REG_HASH_CTRL_FIFO 0x08
177
178#define SSS_HASH_FIFO_MODE_DMA BIT(0)
179#define SSS_HASH_FIFO_MODE_CPU 0
180
181#define SSS_REG_HASH_CTRL_SWAP 0x0C
182
183#define SSS_HASH_BYTESWAP_DI BIT(3)
184#define SSS_HASH_BYTESWAP_DO BIT(2)
185#define SSS_HASH_BYTESWAP_IV BIT(1)
186#define SSS_HASH_BYTESWAP_KEY BIT(0)
187
188#define SSS_REG_HASH_STATUS 0x10
189
190#define SSS_HASH_STATUS_MSG_DONE BIT(6)
191#define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
192#define SSS_HASH_STATUS_BUFFER_READY BIT(0)
193
194#define SSS_REG_HASH_MSG_SIZE_LOW 0x20
195#define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
196
197#define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
198#define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
199
200#define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
201#define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
202
203#define HASH_BLOCK_SIZE 64
204#define HASH_REG_SIZEOF 4
205#define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
206#define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
207#define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
208
209
210
211
212
213
214
215#define HASH_FLAGS_BUSY 0
216#define HASH_FLAGS_FINAL 1
217#define HASH_FLAGS_DMA_ACTIVE 2
218#define HASH_FLAGS_OUTPUT_READY 3
219#define HASH_FLAGS_DMA_READY 4
220#define HASH_FLAGS_SGS_COPIED 5
221#define HASH_FLAGS_SGS_ALLOCED 6
222
223
224#define BUFLEN HASH_BLOCK_SIZE
225
226#define SSS_HASH_DMA_LEN_ALIGN 8
227#define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
228
229#define SSS_HASH_QUEUE_LENGTH 10
230
231
232
233
234
235
236
237
238
239
240
241struct samsung_aes_variant {
242 unsigned int aes_offset;
243 unsigned int hash_offset;
244 const char *clk_names[2];
245};
246
247struct s5p_aes_reqctx {
248 unsigned long mode;
249};
250
251struct s5p_aes_ctx {
252 struct s5p_aes_dev *dev;
253
254 u8 aes_key[AES_MAX_KEY_SIZE];
255 u8 nonce[CTR_RFC3686_NONCE_SIZE];
256 int keylen;
257};
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298struct s5p_aes_dev {
299 struct device *dev;
300 struct clk *clk;
301 struct clk *pclk;
302 void __iomem *ioaddr;
303 void __iomem *aes_ioaddr;
304 int irq_fc;
305
306 struct ablkcipher_request *req;
307 struct s5p_aes_ctx *ctx;
308 struct scatterlist *sg_src;
309 struct scatterlist *sg_dst;
310
311 struct scatterlist *sg_src_cpy;
312 struct scatterlist *sg_dst_cpy;
313
314 struct tasklet_struct tasklet;
315 struct crypto_queue queue;
316 bool busy;
317 spinlock_t lock;
318
319 struct resource *res;
320 void __iomem *io_hash_base;
321
322 spinlock_t hash_lock;
323 unsigned long hash_flags;
324 struct crypto_queue hash_queue;
325 struct tasklet_struct hash_tasklet;
326
327 u8 xmit_buf[BUFLEN];
328 struct ahash_request *hash_req;
329 struct scatterlist *hash_sg_iter;
330 unsigned int hash_sg_cnt;
331
332 bool use_hash;
333};
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353struct s5p_hash_reqctx {
354 struct s5p_aes_dev *dd;
355 bool op_update;
356
357 u64 digcnt;
358 u8 digest[SHA256_DIGEST_SIZE];
359
360 unsigned int nregs;
361 u32 engine;
362
363 struct scatterlist *sg;
364 unsigned int sg_len;
365 struct scatterlist sgl[2];
366 unsigned int skip;
367 unsigned int total;
368 bool finup;
369 bool error;
370
371 u32 bufcnt;
372 u8 buffer[0];
373};
374
375
376
377
378
379
380
381struct s5p_hash_ctx {
382 struct s5p_aes_dev *dd;
383 unsigned long flags;
384 struct crypto_shash *fallback;
385};
386
387static const struct samsung_aes_variant s5p_aes_data = {
388 .aes_offset = 0x4000,
389 .hash_offset = 0x6000,
390 .clk_names = { "secss", },
391};
392
393static const struct samsung_aes_variant exynos_aes_data = {
394 .aes_offset = 0x200,
395 .hash_offset = 0x400,
396 .clk_names = { "secss", },
397};
398
399static const struct samsung_aes_variant exynos5433_slim_aes_data = {
400 .aes_offset = 0x400,
401 .hash_offset = 0x800,
402 .clk_names = { "pclk", "aclk", },
403};
404
405static const struct of_device_id s5p_sss_dt_match[] = {
406 {
407 .compatible = "samsung,s5pv210-secss",
408 .data = &s5p_aes_data,
409 },
410 {
411 .compatible = "samsung,exynos4210-secss",
412 .data = &exynos_aes_data,
413 },
414 {
415 .compatible = "samsung,exynos5433-slim-sss",
416 .data = &exynos5433_slim_aes_data,
417 },
418 { },
419};
420MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
421
422static inline const struct samsung_aes_variant *find_s5p_sss_version
423 (const struct platform_device *pdev)
424{
425 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
426 const struct of_device_id *match;
427
428 match = of_match_node(s5p_sss_dt_match,
429 pdev->dev.of_node);
430 return (const struct samsung_aes_variant *)match->data;
431 }
432 return (const struct samsung_aes_variant *)
433 platform_get_device_id(pdev)->driver_data;
434}
435
436static struct s5p_aes_dev *s5p_dev;
437
438static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
439 const struct scatterlist *sg)
440{
441 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
442 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
443}
444
445static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
446 const struct scatterlist *sg)
447{
448 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
449 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
450}
451
452static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
453{
454 int len;
455
456 if (!*sg)
457 return;
458
459 len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
460 free_pages((unsigned long)sg_virt(*sg), get_order(len));
461
462 kfree(*sg);
463 *sg = NULL;
464}
465
466static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
467 unsigned int nbytes, int out)
468{
469 struct scatter_walk walk;
470
471 if (!nbytes)
472 return;
473
474 scatterwalk_start(&walk, sg);
475 scatterwalk_copychunks(buf, &walk, nbytes, out);
476 scatterwalk_done(&walk, out, 0);
477}
478
479static void s5p_sg_done(struct s5p_aes_dev *dev)
480{
481 struct ablkcipher_request *req = dev->req;
482 struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
483
484 if (dev->sg_dst_cpy) {
485 dev_dbg(dev->dev,
486 "Copying %d bytes of output data back to original place\n",
487 dev->req->nbytes);
488 s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
489 dev->req->nbytes, 1);
490 }
491 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
492 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
493 if (reqctx->mode & FLAGS_AES_CBC)
494 memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
495
496 else if (reqctx->mode & FLAGS_AES_CTR)
497 memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
498}
499
500
501static void s5p_aes_complete(struct ablkcipher_request *req, int err)
502{
503 req->base.complete(&req->base, err);
504}
505
506static void s5p_unset_outdata(struct s5p_aes_dev *dev)
507{
508 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
509}
510
511static void s5p_unset_indata(struct s5p_aes_dev *dev)
512{
513 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
514}
515
516static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
517 struct scatterlist **dst)
518{
519 void *pages;
520 int len;
521
522 *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
523 if (!*dst)
524 return -ENOMEM;
525
526 len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
527 pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
528 if (!pages) {
529 kfree(*dst);
530 *dst = NULL;
531 return -ENOMEM;
532 }
533
534 s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
535
536 sg_init_table(*dst, 1);
537 sg_set_buf(*dst, pages, len);
538
539 return 0;
540}
541
542static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
543{
544 if (!sg->length)
545 return -EINVAL;
546
547 if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
548 return -ENOMEM;
549
550 dev->sg_dst = sg;
551
552 return 0;
553}
554
555static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
556{
557 if (!sg->length)
558 return -EINVAL;
559
560 if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
561 return -ENOMEM;
562
563 dev->sg_src = sg;
564
565 return 0;
566}
567
568
569
570
571
572
573
574
575static int s5p_aes_tx(struct s5p_aes_dev *dev)
576{
577 int ret = 0;
578
579 s5p_unset_outdata(dev);
580
581 if (!sg_is_last(dev->sg_dst)) {
582 ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
583 if (!ret)
584 ret = 1;
585 }
586
587 return ret;
588}
589
590
591
592
593
594
595
596
597static int s5p_aes_rx(struct s5p_aes_dev *dev)
598{
599 int ret = 0;
600
601 s5p_unset_indata(dev);
602
603 if (!sg_is_last(dev->sg_src)) {
604 ret = s5p_set_indata(dev, sg_next(dev->sg_src));
605 if (!ret)
606 ret = 1;
607 }
608
609 return ret;
610}
611
612static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
613{
614 return __raw_readl(dd->io_hash_base + offset);
615}
616
617static inline void s5p_hash_write(struct s5p_aes_dev *dd,
618 u32 offset, u32 value)
619{
620 __raw_writel(value, dd->io_hash_base + offset);
621}
622
623
624
625
626
627
628static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
629 const struct scatterlist *sg)
630{
631 dev->hash_sg_cnt--;
632 SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
633 SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg));
634}
635
636
637
638
639
640
641
642
643
644
645static int s5p_hash_rx(struct s5p_aes_dev *dev)
646{
647 if (dev->hash_sg_cnt > 0) {
648 dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
649 return 1;
650 }
651
652 set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
653 if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
654 return 0;
655
656 return 2;
657}
658
659static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
660{
661 struct platform_device *pdev = dev_id;
662 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
663 struct ablkcipher_request *req;
664 int err_dma_tx = 0;
665 int err_dma_rx = 0;
666 int err_dma_hx = 0;
667 bool tx_end = false;
668 bool hx_end = false;
669 unsigned long flags;
670 u32 status, st_bits;
671 int err;
672
673 spin_lock_irqsave(&dev->lock, flags);
674
675
676
677
678
679
680
681
682
683
684
685 status = SSS_READ(dev, FCINTSTAT);
686 if (status & SSS_FCINTSTAT_BRDMAINT)
687 err_dma_rx = s5p_aes_rx(dev);
688
689 if (status & SSS_FCINTSTAT_BTDMAINT) {
690 if (sg_is_last(dev->sg_dst))
691 tx_end = true;
692 err_dma_tx = s5p_aes_tx(dev);
693 }
694
695 if (status & SSS_FCINTSTAT_HRDMAINT)
696 err_dma_hx = s5p_hash_rx(dev);
697
698 st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
699 SSS_FCINTSTAT_HRDMAINT);
700
701 SSS_WRITE(dev, FCINTPEND, st_bits);
702
703
704 if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
705
706 if (status & SSS_FCINTSTAT_HPARTINT)
707 st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
708
709 if (status & SSS_FCINTSTAT_HDONEINT)
710 st_bits = SSS_HASH_STATUS_MSG_DONE;
711
712 set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
713 s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
714 hx_end = true;
715
716 err_dma_hx = 0;
717 }
718
719 if (err_dma_rx < 0) {
720 err = err_dma_rx;
721 goto error;
722 }
723 if (err_dma_tx < 0) {
724 err = err_dma_tx;
725 goto error;
726 }
727
728 if (tx_end) {
729 s5p_sg_done(dev);
730 if (err_dma_hx == 1)
731 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
732
733 spin_unlock_irqrestore(&dev->lock, flags);
734
735 s5p_aes_complete(dev->req, 0);
736
737 tasklet_schedule(&dev->tasklet);
738 } else {
739
740
741
742
743
744
745 if (err_dma_tx == 1)
746 s5p_set_dma_outdata(dev, dev->sg_dst);
747 if (err_dma_rx == 1)
748 s5p_set_dma_indata(dev, dev->sg_src);
749 if (err_dma_hx == 1)
750 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
751
752 spin_unlock_irqrestore(&dev->lock, flags);
753 }
754
755 goto hash_irq_end;
756
757error:
758 s5p_sg_done(dev);
759 dev->busy = false;
760 req = dev->req;
761 if (err_dma_hx == 1)
762 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
763
764 spin_unlock_irqrestore(&dev->lock, flags);
765 s5p_aes_complete(req, err);
766
767hash_irq_end:
768
769
770
771
772
773 if (hx_end)
774 tasklet_schedule(&dev->hash_tasklet);
775 else if (err_dma_hx == 2)
776 s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
777 SSS_HASH_PAUSE);
778
779 return IRQ_HANDLED;
780}
781
782
783
784
785
786static void s5p_hash_read_msg(struct ahash_request *req)
787{
788 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
789 struct s5p_aes_dev *dd = ctx->dd;
790 u32 *hash = (u32 *)ctx->digest;
791 unsigned int i;
792
793 for (i = 0; i < ctx->nregs; i++)
794 hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
795}
796
797
798
799
800
801
802static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
803 const struct s5p_hash_reqctx *ctx)
804{
805 const u32 *hash = (const u32 *)ctx->digest;
806 unsigned int i;
807
808 for (i = 0; i < ctx->nregs; i++)
809 s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
810}
811
812
813
814
815
816static void s5p_hash_write_iv(struct ahash_request *req)
817{
818 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
819
820 s5p_hash_write_ctx_iv(ctx->dd, ctx);
821}
822
823
824
825
826
827static void s5p_hash_copy_result(struct ahash_request *req)
828{
829 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
830
831 if (!req->result)
832 return;
833
834 memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
835}
836
837
838
839
840
841static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
842{
843 SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
844}
845
846
847
848
849
850
851
852static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
853{
854 s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
855}
856
857
858
859
860
861
862static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
863{
864 SSS_WRITE(dev, FCINTENCLR, flags);
865}
866
867
868
869
870
871
872static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
873{
874 SSS_WRITE(dev, FCINTENSET, flags);
875}
876
877
878
879
880
881
882static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
883{
884 unsigned long flags;
885 u32 flow;
886
887 spin_lock_irqsave(&dev->lock, flags);
888
889 flow = SSS_READ(dev, FCFIFOCTRL);
890 flow &= ~SSS_HASHIN_MASK;
891 flow |= hashflow;
892 SSS_WRITE(dev, FCFIFOCTRL, flow);
893
894 spin_unlock_irqrestore(&dev->lock, flags);
895}
896
897
898
899
900
901
902
903
904
905static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
906{
907 s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
908 SSS_FCINTENCLR_HDONEINTENCLR |
909 SSS_FCINTENCLR_HPARTINTENCLR);
910 s5p_hash_dma_flush(dev);
911
912 s5p_hash_dma_enable(dev);
913 s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
914 s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
915 SSS_FCINTENSET_HDONEINTENSET |
916 SSS_FCINTENSET_HPARTINTENSET);
917}
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
934 bool final)
935{
936 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
937 u32 prelow, prehigh, low, high;
938 u32 configflags, swapflags;
939 u64 tmplen;
940
941 configflags = ctx->engine | SSS_HASH_INIT_BIT;
942
943 if (likely(ctx->digcnt)) {
944 s5p_hash_write_ctx_iv(dd, ctx);
945 configflags |= SSS_HASH_USER_IV_EN;
946 }
947
948 if (final) {
949
950 low = length;
951 high = 0;
952
953 tmplen = ctx->digcnt * 8;
954 prelow = (u32)tmplen;
955 prehigh = (u32)(tmplen >> 32);
956 } else {
957 prelow = 0;
958 prehigh = 0;
959 low = 0;
960 high = BIT(31);
961 }
962
963 swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
964 SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
965
966 s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
967 s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
968 s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
969 s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
970
971 s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
972 s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
973}
974
975
976
977
978
979
980
981
982
983static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
984 bool final)
985{
986 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
987 unsigned int cnt;
988
989 cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
990 if (!cnt) {
991 dev_err(dd->dev, "dma_map_sg error\n");
992 ctx->error = true;
993 return -EINVAL;
994 }
995
996 set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
997 dd->hash_sg_iter = ctx->sg;
998 dd->hash_sg_cnt = cnt;
999 s5p_hash_write_ctrl(dd, length, final);
1000 ctx->digcnt += length;
1001 ctx->total -= length;
1002
1003
1004 if (final)
1005 set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
1006
1007 s5p_set_dma_hashdata(dd, dd->hash_sg_iter);
1008
1009 return -EINPROGRESS;
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
1025 struct scatterlist *sg, unsigned int new_len)
1026{
1027 unsigned int pages, len;
1028 void *buf;
1029
1030 len = new_len + ctx->bufcnt;
1031 pages = get_order(len);
1032
1033 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
1034 if (!buf) {
1035 dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
1036 ctx->error = true;
1037 return -ENOMEM;
1038 }
1039
1040 if (ctx->bufcnt)
1041 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
1042
1043 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
1044 new_len, 0);
1045 sg_init_table(ctx->sgl, 1);
1046 sg_set_buf(ctx->sgl, buf, len);
1047 ctx->sg = ctx->sgl;
1048 ctx->sg_len = 1;
1049 ctx->bufcnt = 0;
1050 ctx->skip = 0;
1051 set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
1052
1053 return 0;
1054}
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
1071 struct scatterlist *sg, unsigned int new_len)
1072{
1073 unsigned int skip = ctx->skip, n = sg_nents(sg);
1074 struct scatterlist *tmp;
1075 unsigned int len;
1076
1077 if (ctx->bufcnt)
1078 n++;
1079
1080 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
1081 if (!ctx->sg) {
1082 ctx->error = true;
1083 return -ENOMEM;
1084 }
1085
1086 sg_init_table(ctx->sg, n);
1087
1088 tmp = ctx->sg;
1089
1090 ctx->sg_len = 0;
1091
1092 if (ctx->bufcnt) {
1093 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
1094 tmp = sg_next(tmp);
1095 ctx->sg_len++;
1096 }
1097
1098 while (sg && skip >= sg->length) {
1099 skip -= sg->length;
1100 sg = sg_next(sg);
1101 }
1102
1103 while (sg && new_len) {
1104 len = sg->length - skip;
1105 if (new_len < len)
1106 len = new_len;
1107
1108 new_len -= len;
1109 sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
1110 skip = 0;
1111 if (new_len <= 0)
1112 sg_mark_end(tmp);
1113
1114 tmp = sg_next(tmp);
1115 ctx->sg_len++;
1116 sg = sg_next(sg);
1117 }
1118
1119 set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
1120
1121 return 0;
1122}
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
1141 struct scatterlist *sg,
1142 unsigned int new_len, bool final)
1143{
1144 unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
1145 bool aligned = true, list_ok = true;
1146 struct scatterlist *sg_tmp = sg;
1147
1148 if (!sg || !sg->length || !new_len)
1149 return 0;
1150
1151 if (skip || !final)
1152 list_ok = false;
1153
1154 while (nbytes > 0 && sg_tmp) {
1155 n++;
1156 if (skip >= sg_tmp->length) {
1157 skip -= sg_tmp->length;
1158 if (!sg_tmp->length) {
1159 aligned = false;
1160 break;
1161 }
1162 } else {
1163 if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
1164 aligned = false;
1165 break;
1166 }
1167
1168 if (nbytes < sg_tmp->length - skip) {
1169 list_ok = false;
1170 break;
1171 }
1172
1173 nbytes -= sg_tmp->length - skip;
1174 skip = 0;
1175 }
1176
1177 sg_tmp = sg_next(sg_tmp);
1178 }
1179
1180 if (!aligned)
1181 return s5p_hash_copy_sgs(ctx, sg, new_len);
1182 else if (!list_ok)
1183 return s5p_hash_copy_sg_lists(ctx, sg, new_len);
1184
1185
1186
1187
1188
1189 if (ctx->bufcnt) {
1190 ctx->sg_len = n;
1191 sg_init_table(ctx->sgl, 2);
1192 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
1193 sg_chain(ctx->sgl, 2, sg);
1194 ctx->sg = ctx->sgl;
1195 ctx->sg_len++;
1196 } else {
1197 ctx->sg = sg;
1198 ctx->sg_len = n;
1199 }
1200
1201 return 0;
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
1215{
1216 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1217 bool final = ctx->finup;
1218 int xmit_len, hash_later, nbytes;
1219 int ret;
1220
1221 if (update)
1222 nbytes = req->nbytes;
1223 else
1224 nbytes = 0;
1225
1226 ctx->total = nbytes + ctx->bufcnt;
1227 if (!ctx->total)
1228 return 0;
1229
1230 if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
1231
1232 int len = BUFLEN - ctx->bufcnt % BUFLEN;
1233
1234 if (len > nbytes)
1235 len = nbytes;
1236
1237 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1238 0, len, 0);
1239 ctx->bufcnt += len;
1240 nbytes -= len;
1241 ctx->skip = len;
1242 } else {
1243 ctx->skip = 0;
1244 }
1245
1246 if (ctx->bufcnt)
1247 memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
1248
1249 xmit_len = ctx->total;
1250 if (final) {
1251 hash_later = 0;
1252 } else {
1253 if (IS_ALIGNED(xmit_len, BUFLEN))
1254 xmit_len -= BUFLEN;
1255 else
1256 xmit_len -= xmit_len & (BUFLEN - 1);
1257
1258 hash_later = ctx->total - xmit_len;
1259
1260
1261 scatterwalk_map_and_copy(ctx->buffer, req->src,
1262 req->nbytes - hash_later,
1263 hash_later, 0);
1264 }
1265
1266 if (xmit_len > BUFLEN) {
1267 ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
1268 final);
1269 if (ret)
1270 return ret;
1271 } else {
1272
1273 if (unlikely(!ctx->bufcnt)) {
1274
1275 scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
1276 0, xmit_len, 0);
1277 }
1278
1279 sg_init_table(ctx->sgl, 1);
1280 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
1281
1282 ctx->sg = ctx->sgl;
1283 ctx->sg_len = 1;
1284 }
1285
1286 ctx->bufcnt = hash_later;
1287 if (!final)
1288 ctx->total = xmit_len;
1289
1290 return 0;
1291}
1292
1293
1294
1295
1296
1297
1298
1299static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
1300{
1301 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
1302
1303 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
1304 clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
1305}
1306
1307
1308
1309
1310
1311static void s5p_hash_finish(struct ahash_request *req)
1312{
1313 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1314 struct s5p_aes_dev *dd = ctx->dd;
1315
1316 if (ctx->digcnt)
1317 s5p_hash_copy_result(req);
1318
1319 dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
1320}
1321
1322
1323
1324
1325
1326
1327static void s5p_hash_finish_req(struct ahash_request *req, int err)
1328{
1329 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1330 struct s5p_aes_dev *dd = ctx->dd;
1331 unsigned long flags;
1332
1333 if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
1334 free_pages((unsigned long)sg_virt(ctx->sg),
1335 get_order(ctx->sg->length));
1336
1337 if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
1338 kfree(ctx->sg);
1339
1340 ctx->sg = NULL;
1341 dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
1342 BIT(HASH_FLAGS_SGS_COPIED));
1343
1344 if (!err && !ctx->error) {
1345 s5p_hash_read_msg(req);
1346 if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
1347 s5p_hash_finish(req);
1348 } else {
1349 ctx->error = true;
1350 }
1351
1352 spin_lock_irqsave(&dd->hash_lock, flags);
1353 dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
1354 BIT(HASH_FLAGS_DMA_READY) |
1355 BIT(HASH_FLAGS_OUTPUT_READY));
1356 spin_unlock_irqrestore(&dd->hash_lock, flags);
1357
1358 if (req->base.complete)
1359 req->base.complete(&req->base, err);
1360}
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
1373 struct ahash_request *req)
1374{
1375 struct crypto_async_request *async_req, *backlog;
1376 struct s5p_hash_reqctx *ctx;
1377 unsigned long flags;
1378 int err = 0, ret = 0;
1379
1380retry:
1381 spin_lock_irqsave(&dd->hash_lock, flags);
1382 if (req)
1383 ret = ahash_enqueue_request(&dd->hash_queue, req);
1384
1385 if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
1386 spin_unlock_irqrestore(&dd->hash_lock, flags);
1387 return ret;
1388 }
1389
1390 backlog = crypto_get_backlog(&dd->hash_queue);
1391 async_req = crypto_dequeue_request(&dd->hash_queue);
1392 if (async_req)
1393 set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
1394
1395 spin_unlock_irqrestore(&dd->hash_lock, flags);
1396
1397 if (!async_req)
1398 return ret;
1399
1400 if (backlog)
1401 backlog->complete(backlog, -EINPROGRESS);
1402
1403 req = ahash_request_cast(async_req);
1404 dd->hash_req = req;
1405 ctx = ahash_request_ctx(req);
1406
1407 err = s5p_hash_prepare_request(req, ctx->op_update);
1408 if (err || !ctx->total)
1409 goto out;
1410
1411 dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
1412 ctx->op_update, req->nbytes);
1413
1414 s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
1415 if (ctx->digcnt)
1416 s5p_hash_write_iv(req);
1417
1418 if (ctx->op_update) {
1419 err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
1420 if (err != -EINPROGRESS && ctx->finup && !ctx->error)
1421
1422 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1423 } else {
1424 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1425 }
1426out:
1427 if (err != -EINPROGRESS) {
1428
1429 s5p_hash_finish_req(req, err);
1430 req = NULL;
1431
1432
1433
1434
1435
1436 goto retry;
1437 }
1438
1439 return ret;
1440}
1441
1442
1443
1444
1445
1446static void s5p_hash_tasklet_cb(unsigned long data)
1447{
1448 struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
1449
1450 if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
1451 s5p_hash_handle_queue(dd, NULL);
1452 return;
1453 }
1454
1455 if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
1456 if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
1457 &dd->hash_flags)) {
1458 s5p_hash_update_dma_stop(dd);
1459 }
1460
1461 if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
1462 &dd->hash_flags)) {
1463
1464 clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
1465 goto finish;
1466 }
1467 }
1468
1469 return;
1470
1471finish:
1472
1473 s5p_hash_finish_req(dd->hash_req, 0);
1474
1475
1476 if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
1477 s5p_hash_handle_queue(dd, NULL);
1478}
1479
1480
1481
1482
1483
1484
1485
1486
1487static int s5p_hash_enqueue(struct ahash_request *req, bool op)
1488{
1489 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1490 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1491
1492 ctx->op_update = op;
1493
1494 return s5p_hash_handle_queue(tctx->dd, req);
1495}
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506static int s5p_hash_update(struct ahash_request *req)
1507{
1508 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1509
1510 if (!req->nbytes)
1511 return 0;
1512
1513 if (ctx->bufcnt + req->nbytes <= BUFLEN) {
1514 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1515 0, req->nbytes, 0);
1516 ctx->bufcnt += req->nbytes;
1517 return 0;
1518 }
1519
1520 return s5p_hash_enqueue(req, true);
1521}
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
1532 const u8 *data, unsigned int len, u8 *out)
1533{
1534 SHASH_DESC_ON_STACK(shash, tfm);
1535
1536 shash->tfm = tfm;
1537
1538 return crypto_shash_digest(shash, data, len, out);
1539}
1540
1541
1542
1543
1544
1545static int s5p_hash_final_shash(struct ahash_request *req)
1546{
1547 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1548 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1549
1550 return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
1551 ctx->buffer, ctx->bufcnt, req->result);
1552}
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577static int s5p_hash_final(struct ahash_request *req)
1578{
1579 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1580
1581 ctx->finup = true;
1582 if (ctx->error)
1583 return -EINVAL;
1584
1585 if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
1586 return s5p_hash_final_shash(req);
1587
1588 return s5p_hash_enqueue(req, false);
1589}
1590
1591
1592
1593
1594
1595
1596
1597static int s5p_hash_finup(struct ahash_request *req)
1598{
1599 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1600 int err1, err2;
1601
1602 ctx->finup = true;
1603
1604 err1 = s5p_hash_update(req);
1605 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1606 return err1;
1607
1608
1609
1610
1611
1612
1613 err2 = s5p_hash_final(req);
1614
1615 return err1 ?: err2;
1616}
1617
1618
1619
1620
1621
1622
1623
1624static int s5p_hash_init(struct ahash_request *req)
1625{
1626 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1627 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1628 struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
1629
1630 ctx->dd = tctx->dd;
1631 ctx->error = false;
1632 ctx->finup = false;
1633 ctx->bufcnt = 0;
1634 ctx->digcnt = 0;
1635 ctx->total = 0;
1636 ctx->skip = 0;
1637
1638 dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
1639 crypto_ahash_digestsize(tfm));
1640
1641 switch (crypto_ahash_digestsize(tfm)) {
1642 case MD5_DIGEST_SIZE:
1643 ctx->engine = SSS_HASH_ENGINE_MD5;
1644 ctx->nregs = HASH_MD5_MAX_REG;
1645 break;
1646 case SHA1_DIGEST_SIZE:
1647 ctx->engine = SSS_HASH_ENGINE_SHA1;
1648 ctx->nregs = HASH_SHA1_MAX_REG;
1649 break;
1650 case SHA256_DIGEST_SIZE:
1651 ctx->engine = SSS_HASH_ENGINE_SHA256;
1652 ctx->nregs = HASH_SHA256_MAX_REG;
1653 break;
1654 default:
1655 ctx->error = true;
1656 return -EINVAL;
1657 }
1658
1659 return 0;
1660}
1661
1662
1663
1664
1665
1666
1667
1668static int s5p_hash_digest(struct ahash_request *req)
1669{
1670 return s5p_hash_init(req) ?: s5p_hash_finup(req);
1671}
1672
1673
1674
1675
1676
1677static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
1678{
1679 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
1680 const char *alg_name = crypto_tfm_alg_name(tfm);
1681
1682 tctx->dd = s5p_dev;
1683
1684 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1685 CRYPTO_ALG_NEED_FALLBACK);
1686 if (IS_ERR(tctx->fallback)) {
1687 pr_err("fallback alloc fails for '%s'\n", alg_name);
1688 return PTR_ERR(tctx->fallback);
1689 }
1690
1691 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1692 sizeof(struct s5p_hash_reqctx) + BUFLEN);
1693
1694 return 0;
1695}
1696
1697
1698
1699
1700
1701static int s5p_hash_cra_init(struct crypto_tfm *tfm)
1702{
1703 return s5p_hash_cra_init_alg(tfm);
1704}
1705
1706
1707
1708
1709
1710
1711
1712static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
1713{
1714 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
1715
1716 crypto_free_shash(tctx->fallback);
1717 tctx->fallback = NULL;
1718}
1719
1720
1721
1722
1723
1724
1725static int s5p_hash_export(struct ahash_request *req, void *out)
1726{
1727 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1728
1729 memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
1730
1731 return 0;
1732}
1733
1734
1735
1736
1737
1738
1739static int s5p_hash_import(struct ahash_request *req, const void *in)
1740{
1741 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1742 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1743 struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
1744 const struct s5p_hash_reqctx *ctx_in = in;
1745
1746 memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
1747 if (ctx_in->bufcnt > BUFLEN) {
1748 ctx->error = true;
1749 return -EINVAL;
1750 }
1751
1752 ctx->dd = tctx->dd;
1753 ctx->error = false;
1754
1755 return 0;
1756}
1757
1758static struct ahash_alg algs_sha1_md5_sha256[] = {
1759{
1760 .init = s5p_hash_init,
1761 .update = s5p_hash_update,
1762 .final = s5p_hash_final,
1763 .finup = s5p_hash_finup,
1764 .digest = s5p_hash_digest,
1765 .export = s5p_hash_export,
1766 .import = s5p_hash_import,
1767 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1768 .halg.digestsize = SHA1_DIGEST_SIZE,
1769 .halg.base = {
1770 .cra_name = "sha1",
1771 .cra_driver_name = "exynos-sha1",
1772 .cra_priority = 100,
1773 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1774 CRYPTO_ALG_ASYNC |
1775 CRYPTO_ALG_NEED_FALLBACK,
1776 .cra_blocksize = HASH_BLOCK_SIZE,
1777 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1778 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1779 .cra_module = THIS_MODULE,
1780 .cra_init = s5p_hash_cra_init,
1781 .cra_exit = s5p_hash_cra_exit,
1782 }
1783},
1784{
1785 .init = s5p_hash_init,
1786 .update = s5p_hash_update,
1787 .final = s5p_hash_final,
1788 .finup = s5p_hash_finup,
1789 .digest = s5p_hash_digest,
1790 .export = s5p_hash_export,
1791 .import = s5p_hash_import,
1792 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1793 .halg.digestsize = MD5_DIGEST_SIZE,
1794 .halg.base = {
1795 .cra_name = "md5",
1796 .cra_driver_name = "exynos-md5",
1797 .cra_priority = 100,
1798 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1799 CRYPTO_ALG_ASYNC |
1800 CRYPTO_ALG_NEED_FALLBACK,
1801 .cra_blocksize = HASH_BLOCK_SIZE,
1802 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1803 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1804 .cra_module = THIS_MODULE,
1805 .cra_init = s5p_hash_cra_init,
1806 .cra_exit = s5p_hash_cra_exit,
1807 }
1808},
1809{
1810 .init = s5p_hash_init,
1811 .update = s5p_hash_update,
1812 .final = s5p_hash_final,
1813 .finup = s5p_hash_finup,
1814 .digest = s5p_hash_digest,
1815 .export = s5p_hash_export,
1816 .import = s5p_hash_import,
1817 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1818 .halg.digestsize = SHA256_DIGEST_SIZE,
1819 .halg.base = {
1820 .cra_name = "sha256",
1821 .cra_driver_name = "exynos-sha256",
1822 .cra_priority = 100,
1823 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1824 CRYPTO_ALG_ASYNC |
1825 CRYPTO_ALG_NEED_FALLBACK,
1826 .cra_blocksize = HASH_BLOCK_SIZE,
1827 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1828 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1829 .cra_module = THIS_MODULE,
1830 .cra_init = s5p_hash_cra_init,
1831 .cra_exit = s5p_hash_cra_exit,
1832 }
1833}
1834
1835};
1836
1837static void s5p_set_aes(struct s5p_aes_dev *dev,
1838 const u8 *key, const u8 *iv, const u8 *ctr,
1839 unsigned int keylen)
1840{
1841 void __iomem *keystart;
1842
1843 if (iv)
1844 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv,
1845 AES_BLOCK_SIZE);
1846
1847 if (ctr)
1848 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr,
1849 AES_BLOCK_SIZE);
1850
1851 if (keylen == AES_KEYSIZE_256)
1852 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
1853 else if (keylen == AES_KEYSIZE_192)
1854 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
1855 else
1856 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
1857
1858 memcpy_toio(keystart, key, keylen);
1859}
1860
1861static bool s5p_is_sg_aligned(struct scatterlist *sg)
1862{
1863 while (sg) {
1864 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
1865 return false;
1866 sg = sg_next(sg);
1867 }
1868
1869 return true;
1870}
1871
1872static int s5p_set_indata_start(struct s5p_aes_dev *dev,
1873 struct ablkcipher_request *req)
1874{
1875 struct scatterlist *sg;
1876 int err;
1877
1878 dev->sg_src_cpy = NULL;
1879 sg = req->src;
1880 if (!s5p_is_sg_aligned(sg)) {
1881 dev_dbg(dev->dev,
1882 "At least one unaligned source scatter list, making a copy\n");
1883 err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
1884 if (err)
1885 return err;
1886
1887 sg = dev->sg_src_cpy;
1888 }
1889
1890 err = s5p_set_indata(dev, sg);
1891 if (err) {
1892 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
1893 return err;
1894 }
1895
1896 return 0;
1897}
1898
1899static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
1900 struct ablkcipher_request *req)
1901{
1902 struct scatterlist *sg;
1903 int err;
1904
1905 dev->sg_dst_cpy = NULL;
1906 sg = req->dst;
1907 if (!s5p_is_sg_aligned(sg)) {
1908 dev_dbg(dev->dev,
1909 "At least one unaligned dest scatter list, making a copy\n");
1910 err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
1911 if (err)
1912 return err;
1913
1914 sg = dev->sg_dst_cpy;
1915 }
1916
1917 err = s5p_set_outdata(dev, sg);
1918 if (err) {
1919 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
1920 return err;
1921 }
1922
1923 return 0;
1924}
1925
1926static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1927{
1928 struct ablkcipher_request *req = dev->req;
1929 u32 aes_control;
1930 unsigned long flags;
1931 int err;
1932 u8 *iv, *ctr;
1933
1934
1935 aes_control = SSS_AES_KEY_CHANGE_MODE;
1936 if (mode & FLAGS_AES_DECRYPT)
1937 aes_control |= SSS_AES_MODE_DECRYPT;
1938
1939 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
1940 aes_control |= SSS_AES_CHAIN_MODE_CBC;
1941 iv = req->info;
1942 ctr = NULL;
1943 } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
1944 aes_control |= SSS_AES_CHAIN_MODE_CTR;
1945 iv = NULL;
1946 ctr = req->info;
1947 } else {
1948 iv = NULL;
1949 ctr = NULL;
1950 }
1951
1952 if (dev->ctx->keylen == AES_KEYSIZE_192)
1953 aes_control |= SSS_AES_KEY_SIZE_192;
1954 else if (dev->ctx->keylen == AES_KEYSIZE_256)
1955 aes_control |= SSS_AES_KEY_SIZE_256;
1956
1957 aes_control |= SSS_AES_FIFO_MODE;
1958
1959
1960 aes_control |= SSS_AES_BYTESWAP_DI
1961 | SSS_AES_BYTESWAP_DO
1962 | SSS_AES_BYTESWAP_IV
1963 | SSS_AES_BYTESWAP_KEY
1964 | SSS_AES_BYTESWAP_CNT;
1965
1966 spin_lock_irqsave(&dev->lock, flags);
1967
1968 SSS_WRITE(dev, FCINTENCLR,
1969 SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
1970 SSS_WRITE(dev, FCFIFOCTRL, 0x00);
1971
1972 err = s5p_set_indata_start(dev, req);
1973 if (err)
1974 goto indata_error;
1975
1976 err = s5p_set_outdata_start(dev, req);
1977 if (err)
1978 goto outdata_error;
1979
1980 SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
1981 s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
1982
1983 s5p_set_dma_indata(dev, dev->sg_src);
1984 s5p_set_dma_outdata(dev, dev->sg_dst);
1985
1986 SSS_WRITE(dev, FCINTENSET,
1987 SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
1988
1989 spin_unlock_irqrestore(&dev->lock, flags);
1990
1991 return;
1992
1993outdata_error:
1994 s5p_unset_indata(dev);
1995
1996indata_error:
1997 s5p_sg_done(dev);
1998 dev->busy = false;
1999 spin_unlock_irqrestore(&dev->lock, flags);
2000 s5p_aes_complete(req, err);
2001}
2002
2003static void s5p_tasklet_cb(unsigned long data)
2004{
2005 struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
2006 struct crypto_async_request *async_req, *backlog;
2007 struct s5p_aes_reqctx *reqctx;
2008 unsigned long flags;
2009
2010 spin_lock_irqsave(&dev->lock, flags);
2011 backlog = crypto_get_backlog(&dev->queue);
2012 async_req = crypto_dequeue_request(&dev->queue);
2013
2014 if (!async_req) {
2015 dev->busy = false;
2016 spin_unlock_irqrestore(&dev->lock, flags);
2017 return;
2018 }
2019 spin_unlock_irqrestore(&dev->lock, flags);
2020
2021 if (backlog)
2022 backlog->complete(backlog, -EINPROGRESS);
2023
2024 dev->req = ablkcipher_request_cast(async_req);
2025 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
2026 reqctx = ablkcipher_request_ctx(dev->req);
2027
2028 s5p_aes_crypt_start(dev, reqctx->mode);
2029}
2030
2031static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
2032 struct ablkcipher_request *req)
2033{
2034 unsigned long flags;
2035 int err;
2036
2037 spin_lock_irqsave(&dev->lock, flags);
2038 err = ablkcipher_enqueue_request(&dev->queue, req);
2039 if (dev->busy) {
2040 spin_unlock_irqrestore(&dev->lock, flags);
2041 return err;
2042 }
2043 dev->busy = true;
2044
2045 spin_unlock_irqrestore(&dev->lock, flags);
2046
2047 tasklet_schedule(&dev->tasklet);
2048
2049 return err;
2050}
2051
2052static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
2053{
2054 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
2055 struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
2056 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
2057 struct s5p_aes_dev *dev = ctx->dev;
2058
2059 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) &&
2060 ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
2061 dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
2062 return -EINVAL;
2063 }
2064
2065 reqctx->mode = mode;
2066
2067 return s5p_aes_handle_req(dev, req);
2068}
2069
2070static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
2071 const u8 *key, unsigned int keylen)
2072{
2073 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
2074 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
2075
2076 if (keylen != AES_KEYSIZE_128 &&
2077 keylen != AES_KEYSIZE_192 &&
2078 keylen != AES_KEYSIZE_256)
2079 return -EINVAL;
2080
2081 memcpy(ctx->aes_key, key, keylen);
2082 ctx->keylen = keylen;
2083
2084 return 0;
2085}
2086
2087static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
2088{
2089 return s5p_aes_crypt(req, 0);
2090}
2091
2092static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
2093{
2094 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
2095}
2096
2097static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
2098{
2099 return s5p_aes_crypt(req, FLAGS_AES_CBC);
2100}
2101
2102static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
2103{
2104 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
2105}
2106
2107static int s5p_aes_ctr_crypt(struct ablkcipher_request *req)
2108{
2109 return s5p_aes_crypt(req, FLAGS_AES_CTR);
2110}
2111
2112static int s5p_aes_cra_init(struct crypto_tfm *tfm)
2113{
2114 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
2115
2116 ctx->dev = s5p_dev;
2117 tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
2118
2119 return 0;
2120}
2121
2122static struct crypto_alg algs[] = {
2123 {
2124 .cra_name = "ecb(aes)",
2125 .cra_driver_name = "ecb-aes-s5p",
2126 .cra_priority = 100,
2127 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2128 CRYPTO_ALG_ASYNC |
2129 CRYPTO_ALG_KERN_DRIVER_ONLY,
2130 .cra_blocksize = AES_BLOCK_SIZE,
2131 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
2132 .cra_alignmask = 0x0f,
2133 .cra_type = &crypto_ablkcipher_type,
2134 .cra_module = THIS_MODULE,
2135 .cra_init = s5p_aes_cra_init,
2136 .cra_u.ablkcipher = {
2137 .min_keysize = AES_MIN_KEY_SIZE,
2138 .max_keysize = AES_MAX_KEY_SIZE,
2139 .setkey = s5p_aes_setkey,
2140 .encrypt = s5p_aes_ecb_encrypt,
2141 .decrypt = s5p_aes_ecb_decrypt,
2142 }
2143 },
2144 {
2145 .cra_name = "cbc(aes)",
2146 .cra_driver_name = "cbc-aes-s5p",
2147 .cra_priority = 100,
2148 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2149 CRYPTO_ALG_ASYNC |
2150 CRYPTO_ALG_KERN_DRIVER_ONLY,
2151 .cra_blocksize = AES_BLOCK_SIZE,
2152 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
2153 .cra_alignmask = 0x0f,
2154 .cra_type = &crypto_ablkcipher_type,
2155 .cra_module = THIS_MODULE,
2156 .cra_init = s5p_aes_cra_init,
2157 .cra_u.ablkcipher = {
2158 .min_keysize = AES_MIN_KEY_SIZE,
2159 .max_keysize = AES_MAX_KEY_SIZE,
2160 .ivsize = AES_BLOCK_SIZE,
2161 .setkey = s5p_aes_setkey,
2162 .encrypt = s5p_aes_cbc_encrypt,
2163 .decrypt = s5p_aes_cbc_decrypt,
2164 }
2165 },
2166 {
2167 .cra_name = "ctr(aes)",
2168 .cra_driver_name = "ctr-aes-s5p",
2169 .cra_priority = 100,
2170 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2171 CRYPTO_ALG_ASYNC |
2172 CRYPTO_ALG_KERN_DRIVER_ONLY,
2173 .cra_blocksize = AES_BLOCK_SIZE,
2174 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
2175 .cra_alignmask = 0x0f,
2176 .cra_type = &crypto_ablkcipher_type,
2177 .cra_module = THIS_MODULE,
2178 .cra_init = s5p_aes_cra_init,
2179 .cra_u.ablkcipher = {
2180 .min_keysize = AES_MIN_KEY_SIZE,
2181 .max_keysize = AES_MAX_KEY_SIZE,
2182 .ivsize = AES_BLOCK_SIZE,
2183 .setkey = s5p_aes_setkey,
2184 .encrypt = s5p_aes_ctr_crypt,
2185 .decrypt = s5p_aes_ctr_crypt,
2186 }
2187 },
2188};
2189
2190static int s5p_aes_probe(struct platform_device *pdev)
2191{
2192 struct device *dev = &pdev->dev;
2193 int i, j, err = -ENODEV;
2194 const struct samsung_aes_variant *variant;
2195 struct s5p_aes_dev *pdata;
2196 struct resource *res;
2197 unsigned int hash_i;
2198
2199 if (s5p_dev)
2200 return -EEXIST;
2201
2202 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2203 if (!pdata)
2204 return -ENOMEM;
2205
2206 variant = find_s5p_sss_version(pdev);
2207 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2208
2209
2210
2211
2212
2213
2214
2215 if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
2216 if (variant == &exynos_aes_data) {
2217 res->end += 0x300;
2218 pdata->use_hash = true;
2219 }
2220 }
2221
2222 pdata->res = res;
2223 pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
2224 if (IS_ERR(pdata->ioaddr)) {
2225 if (!pdata->use_hash)
2226 return PTR_ERR(pdata->ioaddr);
2227
2228 res->end -= 0x300;
2229 pdata->use_hash = false;
2230 pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
2231 if (IS_ERR(pdata->ioaddr))
2232 return PTR_ERR(pdata->ioaddr);
2233 }
2234
2235 pdata->clk = devm_clk_get(dev, variant->clk_names[0]);
2236 if (IS_ERR(pdata->clk)) {
2237 dev_err(dev, "failed to find secss clock %s\n",
2238 variant->clk_names[0]);
2239 return -ENOENT;
2240 }
2241
2242 err = clk_prepare_enable(pdata->clk);
2243 if (err < 0) {
2244 dev_err(dev, "Enabling clock %s failed, err %d\n",
2245 variant->clk_names[0], err);
2246 return err;
2247 }
2248
2249 if (variant->clk_names[1]) {
2250 pdata->pclk = devm_clk_get(dev, variant->clk_names[1]);
2251 if (IS_ERR(pdata->pclk)) {
2252 dev_err(dev, "failed to find clock %s\n",
2253 variant->clk_names[1]);
2254 err = -ENOENT;
2255 goto err_clk;
2256 }
2257
2258 err = clk_prepare_enable(pdata->pclk);
2259 if (err < 0) {
2260 dev_err(dev, "Enabling clock %s failed, err %d\n",
2261 variant->clk_names[0], err);
2262 goto err_clk;
2263 }
2264 } else {
2265 pdata->pclk = NULL;
2266 }
2267
2268 spin_lock_init(&pdata->lock);
2269 spin_lock_init(&pdata->hash_lock);
2270
2271 pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
2272 pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
2273
2274 pdata->irq_fc = platform_get_irq(pdev, 0);
2275 if (pdata->irq_fc < 0) {
2276 err = pdata->irq_fc;
2277 dev_warn(dev, "feed control interrupt is not available.\n");
2278 goto err_irq;
2279 }
2280 err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
2281 s5p_aes_interrupt, IRQF_ONESHOT,
2282 pdev->name, pdev);
2283 if (err < 0) {
2284 dev_warn(dev, "feed control interrupt is not available.\n");
2285 goto err_irq;
2286 }
2287
2288 pdata->busy = false;
2289 pdata->dev = dev;
2290 platform_set_drvdata(pdev, pdata);
2291 s5p_dev = pdata;
2292
2293 tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
2294 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
2295
2296 for (i = 0; i < ARRAY_SIZE(algs); i++) {
2297 err = crypto_register_alg(&algs[i]);
2298 if (err)
2299 goto err_algs;
2300 }
2301
2302 if (pdata->use_hash) {
2303 tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
2304 (unsigned long)pdata);
2305 crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
2306
2307 for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
2308 hash_i++) {
2309 struct ahash_alg *alg;
2310
2311 alg = &algs_sha1_md5_sha256[hash_i];
2312 err = crypto_register_ahash(alg);
2313 if (err) {
2314 dev_err(dev, "can't register '%s': %d\n",
2315 alg->halg.base.cra_driver_name, err);
2316 goto err_hash;
2317 }
2318 }
2319 }
2320
2321 dev_info(dev, "s5p-sss driver registered\n");
2322
2323 return 0;
2324
2325err_hash:
2326 for (j = hash_i - 1; j >= 0; j--)
2327 crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
2328
2329 tasklet_kill(&pdata->hash_tasklet);
2330 res->end -= 0x300;
2331
2332err_algs:
2333 if (i < ARRAY_SIZE(algs))
2334 dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
2335 err);
2336
2337 for (j = 0; j < i; j++)
2338 crypto_unregister_alg(&algs[j]);
2339
2340 tasklet_kill(&pdata->tasklet);
2341
2342err_irq:
2343 if (pdata->pclk)
2344 clk_disable_unprepare(pdata->pclk);
2345
2346err_clk:
2347 clk_disable_unprepare(pdata->clk);
2348 s5p_dev = NULL;
2349
2350 return err;
2351}
2352
2353static int s5p_aes_remove(struct platform_device *pdev)
2354{
2355 struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
2356 int i;
2357
2358 if (!pdata)
2359 return -ENODEV;
2360
2361 for (i = 0; i < ARRAY_SIZE(algs); i++)
2362 crypto_unregister_alg(&algs[i]);
2363
2364 tasklet_kill(&pdata->tasklet);
2365 if (pdata->use_hash) {
2366 for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
2367 crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
2368
2369 pdata->res->end -= 0x300;
2370 tasklet_kill(&pdata->hash_tasklet);
2371 pdata->use_hash = false;
2372 }
2373
2374 if (pdata->pclk)
2375 clk_disable_unprepare(pdata->pclk);
2376
2377 clk_disable_unprepare(pdata->clk);
2378 s5p_dev = NULL;
2379
2380 return 0;
2381}
2382
2383static struct platform_driver s5p_aes_crypto = {
2384 .probe = s5p_aes_probe,
2385 .remove = s5p_aes_remove,
2386 .driver = {
2387 .name = "s5p-secss",
2388 .of_match_table = s5p_sss_dt_match,
2389 },
2390};
2391
2392module_platform_driver(s5p_aes_crypto);
2393
2394MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
2395MODULE_LICENSE("GPL v2");
2396MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
2397MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");
2398