1
2
3
4
5
6
7
8
9
10
11
12
13
14#define pr_fmt(fmt) "hashX hashX: " fmt
15
16#include <linux/clk.h>
17#include <linux/device.h>
18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/klist.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/mod_devicetable.h>
25#include <linux/platform_device.h>
26#include <linux/crypto.h>
27
28#include <linux/regulator/consumer.h>
29#include <linux/dmaengine.h>
30#include <linux/bitops.h>
31
32#include <crypto/internal/hash.h>
33#include <crypto/sha.h>
34#include <crypto/scatterwalk.h>
35#include <crypto/algapi.h>
36
37#include <linux/platform_data/crypto-ux500.h>
38
39#include "hash_alg.h"
40
41static int hash_mode;
42module_param(hash_mode, int, 0);
43MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
44
45
46static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
47 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
48 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
49 0x70, 0x69, 0x0e, 0x1d
50};
51
52
53static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
54 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
55 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
56 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
57 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
58};
59
60
61
62
63
64
65
66struct hash_driver_data {
67 struct klist device_list;
68 struct semaphore device_allocation;
69};
70
71static struct hash_driver_data driver_data;
72
73
74
75
76
77
78
79
80
81
82
83
84static void hash_messagepad(struct hash_device_data *device_data,
85 const u32 *message, u8 index_bytes);
86
87
88
89
90
91
92static void release_hash_device(struct hash_device_data *device_data)
93{
94 spin_lock(&device_data->ctx_lock);
95 device_data->current_ctx->device = NULL;
96 device_data->current_ctx = NULL;
97 spin_unlock(&device_data->ctx_lock);
98
99
100
101
102
103 up(&driver_data.device_allocation);
104}
105
106static void hash_dma_setup_channel(struct hash_device_data *device_data,
107 struct device *dev)
108{
109 struct hash_platform_data *platform_data = dev->platform_data;
110 struct dma_slave_config conf = {
111 .direction = DMA_MEM_TO_DEV,
112 .dst_addr = device_data->phybase + HASH_DMA_FIFO,
113 .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
114 .dst_maxburst = 16,
115 };
116
117 dma_cap_zero(device_data->dma.mask);
118 dma_cap_set(DMA_SLAVE, device_data->dma.mask);
119
120 device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
121 device_data->dma.chan_mem2hash =
122 dma_request_channel(device_data->dma.mask,
123 platform_data->dma_filter,
124 device_data->dma.cfg_mem2hash);
125
126 dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
127
128 init_completion(&device_data->dma.complete);
129}
130
131static void hash_dma_callback(void *data)
132{
133 struct hash_ctx *ctx = data;
134
135 complete(&ctx->device->dma.complete);
136}
137
138static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
139 int len, enum dma_data_direction direction)
140{
141 struct dma_async_tx_descriptor *desc = NULL;
142 struct dma_chan *channel = NULL;
143 dma_cookie_t cookie;
144
145 if (direction != DMA_TO_DEVICE) {
146 dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
147 __func__);
148 return -EFAULT;
149 }
150
151 sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
152
153 channel = ctx->device->dma.chan_mem2hash;
154 ctx->device->dma.sg = sg;
155 ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
156 ctx->device->dma.sg, ctx->device->dma.nents,
157 direction);
158
159 if (!ctx->device->dma.sg_len) {
160 dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
161 __func__);
162 return -EFAULT;
163 }
164
165 dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
166 __func__);
167 desc = dmaengine_prep_slave_sg(channel,
168 ctx->device->dma.sg, ctx->device->dma.sg_len,
169 direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
170 if (!desc) {
171 dev_err(ctx->device->dev,
172 "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
173 return -EFAULT;
174 }
175
176 desc->callback = hash_dma_callback;
177 desc->callback_param = ctx;
178
179 cookie = dmaengine_submit(desc);
180 dma_async_issue_pending(channel);
181
182 return 0;
183}
184
185static void hash_dma_done(struct hash_ctx *ctx)
186{
187 struct dma_chan *chan;
188
189 chan = ctx->device->dma.chan_mem2hash;
190 dmaengine_terminate_all(chan);
191 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
192 ctx->device->dma.sg_len, DMA_TO_DEVICE);
193}
194
195static int hash_dma_write(struct hash_ctx *ctx,
196 struct scatterlist *sg, int len)
197{
198 int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
199 if (error) {
200 dev_dbg(ctx->device->dev,
201 "%s: hash_set_dma_transfer() failed\n", __func__);
202 return error;
203 }
204
205 return len;
206}
207
208
209
210
211
212
213
214
215
216static int get_empty_message_digest(
217 struct hash_device_data *device_data,
218 u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
219{
220 int ret = 0;
221 struct hash_ctx *ctx = device_data->current_ctx;
222 *zero_digest = false;
223
224
225
226
227
228 if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
229 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
230 memcpy(zero_hash, &sha1_zero_message_hash[0],
231 SHA1_DIGEST_SIZE);
232 *zero_hash_size = SHA1_DIGEST_SIZE;
233 *zero_digest = true;
234 } else if (HASH_ALGO_SHA256 ==
235 ctx->config.algorithm) {
236 memcpy(zero_hash, &sha256_zero_message_hash[0],
237 SHA256_DIGEST_SIZE);
238 *zero_hash_size = SHA256_DIGEST_SIZE;
239 *zero_digest = true;
240 } else {
241 dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
242 __func__);
243 ret = -EINVAL;
244 goto out;
245 }
246 } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
247 if (!ctx->keylen) {
248 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
249 memcpy(zero_hash, &zero_message_hmac_sha1[0],
250 SHA1_DIGEST_SIZE);
251 *zero_hash_size = SHA1_DIGEST_SIZE;
252 *zero_digest = true;
253 } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
254 memcpy(zero_hash, &zero_message_hmac_sha256[0],
255 SHA256_DIGEST_SIZE);
256 *zero_hash_size = SHA256_DIGEST_SIZE;
257 *zero_digest = true;
258 } else {
259 dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
260 __func__);
261 ret = -EINVAL;
262 goto out;
263 }
264 } else {
265 dev_dbg(device_data->dev,
266 "%s: Continue hash calculation, since hmac key available\n",
267 __func__);
268 }
269 }
270out:
271
272 return ret;
273}
274
275
276
277
278
279
280
281
282
283static int hash_disable_power(struct hash_device_data *device_data,
284 bool save_device_state)
285{
286 int ret = 0;
287 struct device *dev = device_data->dev;
288
289 spin_lock(&device_data->power_state_lock);
290 if (!device_data->power_state)
291 goto out;
292
293 if (save_device_state) {
294 hash_save_state(device_data,
295 &device_data->state);
296 device_data->restore_dev_state = true;
297 }
298
299 clk_disable(device_data->clk);
300 ret = regulator_disable(device_data->regulator);
301 if (ret)
302 dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
303
304 device_data->power_state = false;
305
306out:
307 spin_unlock(&device_data->power_state_lock);
308
309 return ret;
310}
311
312
313
314
315
316
317
318
319
320static int hash_enable_power(struct hash_device_data *device_data,
321 bool restore_device_state)
322{
323 int ret = 0;
324 struct device *dev = device_data->dev;
325
326 spin_lock(&device_data->power_state_lock);
327 if (!device_data->power_state) {
328 ret = regulator_enable(device_data->regulator);
329 if (ret) {
330 dev_err(dev, "%s: regulator_enable() failed!\n",
331 __func__);
332 goto out;
333 }
334 ret = clk_enable(device_data->clk);
335 if (ret) {
336 dev_err(dev, "%s: clk_enable() failed!\n", __func__);
337 ret = regulator_disable(
338 device_data->regulator);
339 goto out;
340 }
341 device_data->power_state = true;
342 }
343
344 if (device_data->restore_dev_state) {
345 if (restore_device_state) {
346 device_data->restore_dev_state = false;
347 hash_resume_state(device_data, &device_data->state);
348 }
349 }
350out:
351 spin_unlock(&device_data->power_state_lock);
352
353 return ret;
354}
355
356
357
358
359
360
361
362
363
364
365static int hash_get_device_data(struct hash_ctx *ctx,
366 struct hash_device_data **device_data)
367{
368 int ret;
369 struct klist_iter device_iterator;
370 struct klist_node *device_node;
371 struct hash_device_data *local_device_data = NULL;
372
373
374 ret = down_interruptible(&driver_data.device_allocation);
375 if (ret)
376 return ret;
377
378
379 klist_iter_init(&driver_data.device_list, &device_iterator);
380 device_node = klist_next(&device_iterator);
381 while (device_node) {
382 local_device_data = container_of(device_node,
383 struct hash_device_data, list_node);
384 spin_lock(&local_device_data->ctx_lock);
385
386 if (local_device_data->current_ctx) {
387 device_node = klist_next(&device_iterator);
388 } else {
389 local_device_data->current_ctx = ctx;
390 ctx->device = local_device_data;
391 spin_unlock(&local_device_data->ctx_lock);
392 break;
393 }
394 spin_unlock(&local_device_data->ctx_lock);
395 }
396 klist_iter_exit(&device_iterator);
397
398 if (!device_node) {
399
400
401
402
403
404
405
406
407 return -EBUSY;
408 }
409
410 *device_data = local_device_data;
411
412 return 0;
413}
414
415
416
417
418
419
420
421
422
423
424
425
426static void hash_hw_write_key(struct hash_device_data *device_data,
427 const u8 *key, unsigned int keylen)
428{
429 u32 word = 0;
430 int nwords = 1;
431
432 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
433
434 while (keylen >= 4) {
435 u32 *key_word = (u32 *)key;
436
437 HASH_SET_DIN(key_word, nwords);
438 keylen -= 4;
439 key += 4;
440 }
441
442
443 if (keylen) {
444 word = 0;
445 while (keylen) {
446 word |= (key[keylen - 1] << (8 * (keylen - 1)));
447 keylen--;
448 }
449
450 HASH_SET_DIN(&word, nwords);
451 }
452
453 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
454 cpu_relax();
455
456 HASH_SET_DCAL;
457
458 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
459 cpu_relax();
460}
461
462
463
464
465
466
467
468
469
470static int init_hash_hw(struct hash_device_data *device_data,
471 struct hash_ctx *ctx)
472{
473 int ret = 0;
474
475 ret = hash_setconfiguration(device_data, &ctx->config);
476 if (ret) {
477 dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
478 __func__);
479 return ret;
480 }
481
482 hash_begin(device_data, ctx);
483
484 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
485 hash_hw_write_key(device_data, ctx->key, ctx->keylen);
486
487 return ret;
488}
489
490
491
492
493
494
495
496
497
498static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
499{
500 int nents = 0;
501 bool aligned_data = true;
502
503 while (size > 0 && sg) {
504 nents++;
505 size -= sg->length;
506
507
508 if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
509 (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
510 aligned_data = false;
511
512 sg = sg_next(sg);
513 }
514
515 if (aligned)
516 *aligned = aligned_data;
517
518 if (size != 0)
519 return -EFAULT;
520
521 return nents;
522}
523
524
525
526
527
528
529
530
531
532static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
533{
534 bool aligned;
535
536
537 if (hash_get_nents(sg, datasize, &aligned) < 1)
538 return false;
539
540 return aligned;
541}
542
543
544
545
546
547
548
549static int hash_init(struct ahash_request *req)
550{
551 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
552 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
553 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
554
555 if (!ctx->key)
556 ctx->keylen = 0;
557
558 memset(&req_ctx->state, 0, sizeof(struct hash_state));
559 req_ctx->updated = 0;
560 if (hash_mode == HASH_MODE_DMA) {
561 if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
562 req_ctx->dma_mode = false;
563
564 pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
565 __func__, HASH_DMA_ALIGN_SIZE);
566 } else {
567 if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
568 hash_dma_valid_data(req->src, req->nbytes)) {
569 req_ctx->dma_mode = true;
570 } else {
571 req_ctx->dma_mode = false;
572 pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
573 __func__,
574 HASH_DMA_PERFORMANCE_MIN_SIZE);
575 }
576 }
577 }
578 return 0;
579}
580
581
582
583
584
585
586
587
588
589static void hash_processblock(struct hash_device_data *device_data,
590 const u32 *message, int length)
591{
592 int len = length / HASH_BYTES_PER_WORD;
593
594
595
596 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
597
598
599
600
601 HASH_SET_DIN(message, len);
602}
603
604
605
606
607
608
609
610
611
612
613
614static void hash_messagepad(struct hash_device_data *device_data,
615 const u32 *message, u8 index_bytes)
616{
617 int nwords = 1;
618
619
620
621
622
623 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
624
625
626 while (index_bytes >= 4) {
627 HASH_SET_DIN(message, nwords);
628 index_bytes -= 4;
629 message++;
630 }
631
632 if (index_bytes)
633 HASH_SET_DIN(message, nwords);
634
635 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
636 cpu_relax();
637
638
639 HASH_SET_NBLW(index_bytes * 8);
640 dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
641 __func__, readl_relaxed(&device_data->base->din),
642 readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
643 HASH_SET_DCAL;
644 dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
645 __func__, readl_relaxed(&device_data->base->din),
646 readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
647
648 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
649 cpu_relax();
650}
651
652
653
654
655
656
657
658
659
660static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
661{
662 ctx->state.length.low_word += incr;
663
664
665 if (ctx->state.length.low_word < incr)
666 ctx->state.length.high_word++;
667}
668
669
670
671
672
673
674
675int hash_setconfiguration(struct hash_device_data *device_data,
676 struct hash_config *config)
677{
678 int ret = 0;
679
680 if (config->algorithm != HASH_ALGO_SHA1 &&
681 config->algorithm != HASH_ALGO_SHA256)
682 return -EPERM;
683
684
685
686
687
688 HASH_SET_DATA_FORMAT(config->data_format);
689
690
691
692
693 switch (config->algorithm) {
694 case HASH_ALGO_SHA1:
695 HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
696 break;
697
698 case HASH_ALGO_SHA256:
699 HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
700 break;
701
702 default:
703 dev_err(device_data->dev, "%s: Incorrect algorithm\n",
704 __func__);
705 return -EPERM;
706 }
707
708
709
710
711
712 if (HASH_OPER_MODE_HASH == config->oper_mode)
713 HASH_CLEAR_BITS(&device_data->base->cr,
714 HASH_CR_MODE_MASK);
715 else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
716 HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
717 if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
718
719 dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
720 HASH_SET_BITS(&device_data->base->cr,
721 HASH_CR_LKEY_MASK);
722 } else {
723 dev_dbg(device_data->dev, "%s: LKEY cleared\n",
724 __func__);
725 HASH_CLEAR_BITS(&device_data->base->cr,
726 HASH_CR_LKEY_MASK);
727 }
728 } else {
729 ret = -EPERM;
730 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
731 __func__);
732 }
733 return ret;
734}
735
736
737
738
739
740
741
742void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
743{
744
745
746
747 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
748 cpu_relax();
749
750
751
752
753
754
755 HASH_INITIALIZE;
756
757
758
759
760 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
761}
762
763static int hash_process_data(struct hash_device_data *device_data,
764 struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
765 int msg_length, u8 *data_buffer, u8 *buffer,
766 u8 *index)
767{
768 int ret = 0;
769 u32 count;
770
771 do {
772 if ((*index + msg_length) < HASH_BLOCK_SIZE) {
773 for (count = 0; count < msg_length; count++) {
774 buffer[*index + count] =
775 *(data_buffer + count);
776 }
777 *index += msg_length;
778 msg_length = 0;
779 } else {
780 if (req_ctx->updated) {
781 ret = hash_resume_state(device_data,
782 &device_data->state);
783 memmove(req_ctx->state.buffer,
784 device_data->state.buffer,
785 HASH_BLOCK_SIZE);
786 if (ret) {
787 dev_err(device_data->dev,
788 "%s: hash_resume_state() failed!\n",
789 __func__);
790 goto out;
791 }
792 } else {
793 ret = init_hash_hw(device_data, ctx);
794 if (ret) {
795 dev_err(device_data->dev,
796 "%s: init_hash_hw() failed!\n",
797 __func__);
798 goto out;
799 }
800 req_ctx->updated = 1;
801 }
802
803
804
805
806
807
808
809 if ((0 == (((u32)data_buffer) % 4)) &&
810 (0 == *index))
811 hash_processblock(device_data,
812 (const u32 *)data_buffer,
813 HASH_BLOCK_SIZE);
814 else {
815 for (count = 0;
816 count < (u32)(HASH_BLOCK_SIZE - *index);
817 count++) {
818 buffer[*index + count] =
819 *(data_buffer + count);
820 }
821 hash_processblock(device_data,
822 (const u32 *)buffer,
823 HASH_BLOCK_SIZE);
824 }
825 hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
826 data_buffer += (HASH_BLOCK_SIZE - *index);
827
828 msg_length -= (HASH_BLOCK_SIZE - *index);
829 *index = 0;
830
831 ret = hash_save_state(device_data,
832 &device_data->state);
833
834 memmove(device_data->state.buffer,
835 req_ctx->state.buffer,
836 HASH_BLOCK_SIZE);
837 if (ret) {
838 dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
839 __func__);
840 goto out;
841 }
842 }
843 } while (msg_length != 0);
844out:
845
846 return ret;
847}
848
849
850
851
852
853static int hash_dma_final(struct ahash_request *req)
854{
855 int ret = 0;
856 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
857 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
858 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
859 struct hash_device_data *device_data;
860 u8 digest[SHA256_DIGEST_SIZE];
861 int bytes_written = 0;
862
863 ret = hash_get_device_data(ctx, &device_data);
864 if (ret)
865 return ret;
866
867 dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
868
869 if (req_ctx->updated) {
870 ret = hash_resume_state(device_data, &device_data->state);
871
872 if (ret) {
873 dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
874 __func__);
875 goto out;
876 }
877 }
878
879 if (!req_ctx->updated) {
880 ret = hash_setconfiguration(device_data, &ctx->config);
881 if (ret) {
882 dev_err(device_data->dev,
883 "%s: hash_setconfiguration() failed!\n",
884 __func__);
885 goto out;
886 }
887
888
889 if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
890 HASH_CLEAR_BITS(&device_data->base->cr,
891 HASH_CR_DMAE_MASK);
892 } else {
893 HASH_SET_BITS(&device_data->base->cr,
894 HASH_CR_DMAE_MASK);
895 HASH_SET_BITS(&device_data->base->cr,
896 HASH_CR_PRIVN_MASK);
897 }
898
899 HASH_INITIALIZE;
900
901 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
902 hash_hw_write_key(device_data, ctx->key, ctx->keylen);
903
904
905 HASH_SET_NBLW((req->nbytes * 8) % 32);
906 req_ctx->updated = 1;
907 }
908
909
910 ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
911 if (!ctx->device->dma.nents) {
912 dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
913 __func__);
914 ret = ctx->device->dma.nents;
915 goto out;
916 }
917
918 bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
919 if (bytes_written != req->nbytes) {
920 dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
921 __func__);
922 ret = bytes_written;
923 goto out;
924 }
925
926 wait_for_completion(&ctx->device->dma.complete);
927 hash_dma_done(ctx);
928
929 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
930 cpu_relax();
931
932 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
933 unsigned int keylen = ctx->keylen;
934 u8 *key = ctx->key;
935
936 dev_dbg(device_data->dev, "%s: keylen: %d\n",
937 __func__, ctx->keylen);
938 hash_hw_write_key(device_data, key, keylen);
939 }
940
941 hash_get_digest(device_data, digest, ctx->config.algorithm);
942 memcpy(req->result, digest, ctx->digestsize);
943
944out:
945 release_hash_device(device_data);
946
947
948
949
950 kfree(ctx->key);
951
952 return ret;
953}
954
955
956
957
958
959static int hash_hw_final(struct ahash_request *req)
960{
961 int ret = 0;
962 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
963 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
964 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
965 struct hash_device_data *device_data;
966 u8 digest[SHA256_DIGEST_SIZE];
967
968 ret = hash_get_device_data(ctx, &device_data);
969 if (ret)
970 return ret;
971
972 dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
973
974 if (req_ctx->updated) {
975 ret = hash_resume_state(device_data, &device_data->state);
976
977 if (ret) {
978 dev_err(device_data->dev,
979 "%s: hash_resume_state() failed!\n", __func__);
980 goto out;
981 }
982 } else if (req->nbytes == 0 && ctx->keylen == 0) {
983 u8 zero_hash[SHA256_DIGEST_SIZE];
984 u32 zero_hash_size = 0;
985 bool zero_digest = false;
986
987
988
989
990 ret = get_empty_message_digest(device_data, &zero_hash[0],
991 &zero_hash_size, &zero_digest);
992 if (!ret && likely(zero_hash_size == ctx->digestsize) &&
993 zero_digest) {
994 memcpy(req->result, &zero_hash[0], ctx->digestsize);
995 goto out;
996 } else if (!ret && !zero_digest) {
997 dev_dbg(device_data->dev,
998 "%s: HMAC zero msg with key, continue...\n",
999 __func__);
1000 } else {
1001 dev_err(device_data->dev,
1002 "%s: ret=%d, or wrong digest size? %s\n",
1003 __func__, ret,
1004 zero_hash_size == ctx->digestsize ?
1005 "true" : "false");
1006
1007 goto out;
1008 }
1009 } else if (req->nbytes == 0 && ctx->keylen > 0) {
1010 dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
1011 __func__);
1012 goto out;
1013 }
1014
1015 if (!req_ctx->updated) {
1016 ret = init_hash_hw(device_data, ctx);
1017 if (ret) {
1018 dev_err(device_data->dev,
1019 "%s: init_hash_hw() failed!\n", __func__);
1020 goto out;
1021 }
1022 }
1023
1024 if (req_ctx->state.index) {
1025 hash_messagepad(device_data, req_ctx->state.buffer,
1026 req_ctx->state.index);
1027 } else {
1028 HASH_SET_DCAL;
1029 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1030 cpu_relax();
1031 }
1032
1033 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
1034 unsigned int keylen = ctx->keylen;
1035 u8 *key = ctx->key;
1036
1037 dev_dbg(device_data->dev, "%s: keylen: %d\n",
1038 __func__, ctx->keylen);
1039 hash_hw_write_key(device_data, key, keylen);
1040 }
1041
1042 hash_get_digest(device_data, digest, ctx->config.algorithm);
1043 memcpy(req->result, digest, ctx->digestsize);
1044
1045out:
1046 release_hash_device(device_data);
1047
1048
1049
1050
1051 kfree(ctx->key);
1052
1053 return ret;
1054}
1055
1056
1057
1058
1059
1060
1061
1062int hash_hw_update(struct ahash_request *req)
1063{
1064 int ret = 0;
1065 u8 index = 0;
1066 u8 *buffer;
1067 struct hash_device_data *device_data;
1068 u8 *data_buffer;
1069 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1070 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1071 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1072 struct crypto_hash_walk walk;
1073 int msg_length = crypto_hash_walk_first(req, &walk);
1074
1075
1076 if (msg_length == 0)
1077 return ret;
1078
1079 index = req_ctx->state.index;
1080 buffer = (u8 *)req_ctx->state.buffer;
1081
1082
1083
1084 if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
1085 HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
1086 pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
1087 return -EPERM;
1088 }
1089
1090 ret = hash_get_device_data(ctx, &device_data);
1091 if (ret)
1092 return ret;
1093
1094
1095 while (0 != msg_length) {
1096 data_buffer = walk.data;
1097 ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
1098 data_buffer, buffer, &index);
1099
1100 if (ret) {
1101 dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
1102 __func__);
1103 goto out;
1104 }
1105
1106 msg_length = crypto_hash_walk_done(&walk, 0);
1107 }
1108
1109 req_ctx->state.index = index;
1110 dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
1111 __func__, req_ctx->state.index, req_ctx->state.bit_index);
1112
1113out:
1114 release_hash_device(device_data);
1115
1116 return ret;
1117}
1118
1119
1120
1121
1122
1123
1124int hash_resume_state(struct hash_device_data *device_data,
1125 const struct hash_state *device_state)
1126{
1127 u32 temp_cr;
1128 s32 count;
1129 int hash_mode = HASH_OPER_MODE_HASH;
1130
1131 if (NULL == device_state) {
1132 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1133 __func__);
1134 return -EPERM;
1135 }
1136
1137
1138 if (device_state->index > HASH_BLOCK_SIZE ||
1139 (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
1140 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1141 __func__);
1142 return -EPERM;
1143 }
1144
1145
1146
1147
1148
1149
1150 HASH_INITIALIZE;
1151
1152 temp_cr = device_state->temp_cr;
1153 writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
1154
1155 if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1156 hash_mode = HASH_OPER_MODE_HMAC;
1157 else
1158 hash_mode = HASH_OPER_MODE_HASH;
1159
1160 for (count = 0; count < HASH_CSR_COUNT; count++) {
1161 if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1162 break;
1163
1164 writel_relaxed(device_state->csr[count],
1165 &device_data->base->csrx[count]);
1166 }
1167
1168 writel_relaxed(device_state->csfull, &device_data->base->csfull);
1169 writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
1170
1171 writel_relaxed(device_state->str_reg, &device_data->base->str);
1172 writel_relaxed(temp_cr, &device_data->base->cr);
1173
1174 return 0;
1175}
1176
1177
1178
1179
1180
1181
1182int hash_save_state(struct hash_device_data *device_data,
1183 struct hash_state *device_state)
1184{
1185 u32 temp_cr;
1186 u32 count;
1187 int hash_mode = HASH_OPER_MODE_HASH;
1188
1189 if (NULL == device_state) {
1190 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1191 __func__);
1192 return -ENOTSUPP;
1193 }
1194
1195
1196
1197
1198
1199 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1200 cpu_relax();
1201
1202 temp_cr = readl_relaxed(&device_data->base->cr);
1203
1204 device_state->str_reg = readl_relaxed(&device_data->base->str);
1205
1206 device_state->din_reg = readl_relaxed(&device_data->base->din);
1207
1208 if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1209 hash_mode = HASH_OPER_MODE_HMAC;
1210 else
1211 hash_mode = HASH_OPER_MODE_HASH;
1212
1213 for (count = 0; count < HASH_CSR_COUNT; count++) {
1214 if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1215 break;
1216
1217 device_state->csr[count] =
1218 readl_relaxed(&device_data->base->csrx[count]);
1219 }
1220
1221 device_state->csfull = readl_relaxed(&device_data->base->csfull);
1222 device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
1223
1224 device_state->temp_cr = temp_cr;
1225
1226 return 0;
1227}
1228
1229
1230
1231
1232
1233
1234int hash_check_hw(struct hash_device_data *device_data)
1235{
1236
1237 if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
1238 HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
1239 HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
1240 HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
1241 HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
1242 HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
1243 HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
1244 HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
1245 return 0;
1246 }
1247
1248 dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
1249 return -ENOTSUPP;
1250}
1251
1252
1253
1254
1255
1256
1257
1258void hash_get_digest(struct hash_device_data *device_data,
1259 u8 *digest, int algorithm)
1260{
1261 u32 temp_hx_val, count;
1262 int loop_ctr;
1263
1264 if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
1265 dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
1266 __func__, algorithm);
1267 return;
1268 }
1269
1270 if (algorithm == HASH_ALGO_SHA1)
1271 loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
1272 else
1273 loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
1274
1275 dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n",
1276 __func__, (u32) digest);
1277
1278
1279 for (count = 0; count < loop_ctr; count++) {
1280 temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
1281 digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
1282 digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
1283 digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
1284 digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
1285 }
1286}
1287
1288
1289
1290
1291
1292static int ahash_update(struct ahash_request *req)
1293{
1294 int ret = 0;
1295 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1296
1297 if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
1298 ret = hash_hw_update(req);
1299
1300
1301 if (ret) {
1302 pr_err("%s: hash_hw_update() failed!\n", __func__);
1303 }
1304
1305 return ret;
1306}
1307
1308
1309
1310
1311
1312static int ahash_final(struct ahash_request *req)
1313{
1314 int ret = 0;
1315 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1316
1317 pr_debug("%s: data size: %d\n", __func__, req->nbytes);
1318
1319 if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
1320 ret = hash_dma_final(req);
1321 else
1322 ret = hash_hw_final(req);
1323
1324 if (ret) {
1325 pr_err("%s: hash_hw/dma_final() failed\n", __func__);
1326 }
1327
1328 return ret;
1329}
1330
1331static int hash_setkey(struct crypto_ahash *tfm,
1332 const u8 *key, unsigned int keylen, int alg)
1333{
1334 int ret = 0;
1335 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1336
1337
1338
1339
1340 ctx->key = kmemdup(key, keylen, GFP_KERNEL);
1341 if (!ctx->key) {
1342 pr_err("%s: Failed to allocate ctx->key for %d\n",
1343 __func__, alg);
1344 return -ENOMEM;
1345 }
1346 ctx->keylen = keylen;
1347
1348 return ret;
1349}
1350
1351static int ahash_sha1_init(struct ahash_request *req)
1352{
1353 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1354 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1355
1356 ctx->config.data_format = HASH_DATA_8_BITS;
1357 ctx->config.algorithm = HASH_ALGO_SHA1;
1358 ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1359 ctx->digestsize = SHA1_DIGEST_SIZE;
1360
1361 return hash_init(req);
1362}
1363
1364static int ahash_sha256_init(struct ahash_request *req)
1365{
1366 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1367 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1368
1369 ctx->config.data_format = HASH_DATA_8_BITS;
1370 ctx->config.algorithm = HASH_ALGO_SHA256;
1371 ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1372 ctx->digestsize = SHA256_DIGEST_SIZE;
1373
1374 return hash_init(req);
1375}
1376
1377static int ahash_sha1_digest(struct ahash_request *req)
1378{
1379 int ret2, ret1;
1380
1381 ret1 = ahash_sha1_init(req);
1382 if (ret1)
1383 goto out;
1384
1385 ret1 = ahash_update(req);
1386 ret2 = ahash_final(req);
1387
1388out:
1389 return ret1 ? ret1 : ret2;
1390}
1391
1392static int ahash_sha256_digest(struct ahash_request *req)
1393{
1394 int ret2, ret1;
1395
1396 ret1 = ahash_sha256_init(req);
1397 if (ret1)
1398 goto out;
1399
1400 ret1 = ahash_update(req);
1401 ret2 = ahash_final(req);
1402
1403out:
1404 return ret1 ? ret1 : ret2;
1405}
1406
1407static int ahash_noimport(struct ahash_request *req, const void *in)
1408{
1409 return -ENOSYS;
1410}
1411
1412static int ahash_noexport(struct ahash_request *req, void *out)
1413{
1414 return -ENOSYS;
1415}
1416
1417static int hmac_sha1_init(struct ahash_request *req)
1418{
1419 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1420 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1421
1422 ctx->config.data_format = HASH_DATA_8_BITS;
1423 ctx->config.algorithm = HASH_ALGO_SHA1;
1424 ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
1425 ctx->digestsize = SHA1_DIGEST_SIZE;
1426
1427 return hash_init(req);
1428}
1429
1430static int hmac_sha256_init(struct ahash_request *req)
1431{
1432 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1433 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1434
1435 ctx->config.data_format = HASH_DATA_8_BITS;
1436 ctx->config.algorithm = HASH_ALGO_SHA256;
1437 ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
1438 ctx->digestsize = SHA256_DIGEST_SIZE;
1439
1440 return hash_init(req);
1441}
1442
1443static int hmac_sha1_digest(struct ahash_request *req)
1444{
1445 int ret2, ret1;
1446
1447 ret1 = hmac_sha1_init(req);
1448 if (ret1)
1449 goto out;
1450
1451 ret1 = ahash_update(req);
1452 ret2 = ahash_final(req);
1453
1454out:
1455 return ret1 ? ret1 : ret2;
1456}
1457
1458static int hmac_sha256_digest(struct ahash_request *req)
1459{
1460 int ret2, ret1;
1461
1462 ret1 = hmac_sha256_init(req);
1463 if (ret1)
1464 goto out;
1465
1466 ret1 = ahash_update(req);
1467 ret2 = ahash_final(req);
1468
1469out:
1470 return ret1 ? ret1 : ret2;
1471}
1472
1473static int hmac_sha1_setkey(struct crypto_ahash *tfm,
1474 const u8 *key, unsigned int keylen)
1475{
1476 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
1477}
1478
1479static int hmac_sha256_setkey(struct crypto_ahash *tfm,
1480 const u8 *key, unsigned int keylen)
1481{
1482 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
1483}
1484
1485struct hash_algo_template {
1486 struct hash_config conf;
1487 struct ahash_alg hash;
1488};
1489
1490static int hash_cra_init(struct crypto_tfm *tfm)
1491{
1492 struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
1493 struct crypto_alg *alg = tfm->__crt_alg;
1494 struct hash_algo_template *hash_alg;
1495
1496 hash_alg = container_of(__crypto_ahash_alg(alg),
1497 struct hash_algo_template,
1498 hash);
1499
1500 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1501 sizeof(struct hash_req_ctx));
1502
1503 ctx->config.data_format = HASH_DATA_8_BITS;
1504 ctx->config.algorithm = hash_alg->conf.algorithm;
1505 ctx->config.oper_mode = hash_alg->conf.oper_mode;
1506
1507 ctx->digestsize = hash_alg->hash.halg.digestsize;
1508
1509 return 0;
1510}
1511
1512static struct hash_algo_template hash_algs[] = {
1513 {
1514 .conf.algorithm = HASH_ALGO_SHA1,
1515 .conf.oper_mode = HASH_OPER_MODE_HASH,
1516 .hash = {
1517 .init = hash_init,
1518 .update = ahash_update,
1519 .final = ahash_final,
1520 .digest = ahash_sha1_digest,
1521 .export = ahash_noexport,
1522 .import = ahash_noimport,
1523 .halg.digestsize = SHA1_DIGEST_SIZE,
1524 .halg.statesize = sizeof(struct hash_ctx),
1525 .halg.base = {
1526 .cra_name = "sha1",
1527 .cra_driver_name = "sha1-ux500",
1528 .cra_flags = CRYPTO_ALG_ASYNC,
1529 .cra_blocksize = SHA1_BLOCK_SIZE,
1530 .cra_ctxsize = sizeof(struct hash_ctx),
1531 .cra_init = hash_cra_init,
1532 .cra_module = THIS_MODULE,
1533 }
1534 }
1535 },
1536 {
1537 .conf.algorithm = HASH_ALGO_SHA256,
1538 .conf.oper_mode = HASH_OPER_MODE_HASH,
1539 .hash = {
1540 .init = hash_init,
1541 .update = ahash_update,
1542 .final = ahash_final,
1543 .digest = ahash_sha256_digest,
1544 .export = ahash_noexport,
1545 .import = ahash_noimport,
1546 .halg.digestsize = SHA256_DIGEST_SIZE,
1547 .halg.statesize = sizeof(struct hash_ctx),
1548 .halg.base = {
1549 .cra_name = "sha256",
1550 .cra_driver_name = "sha256-ux500",
1551 .cra_flags = CRYPTO_ALG_ASYNC,
1552 .cra_blocksize = SHA256_BLOCK_SIZE,
1553 .cra_ctxsize = sizeof(struct hash_ctx),
1554 .cra_init = hash_cra_init,
1555 .cra_module = THIS_MODULE,
1556 }
1557 }
1558 },
1559 {
1560 .conf.algorithm = HASH_ALGO_SHA1,
1561 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1562 .hash = {
1563 .init = hash_init,
1564 .update = ahash_update,
1565 .final = ahash_final,
1566 .digest = hmac_sha1_digest,
1567 .setkey = hmac_sha1_setkey,
1568 .export = ahash_noexport,
1569 .import = ahash_noimport,
1570 .halg.digestsize = SHA1_DIGEST_SIZE,
1571 .halg.statesize = sizeof(struct hash_ctx),
1572 .halg.base = {
1573 .cra_name = "hmac(sha1)",
1574 .cra_driver_name = "hmac-sha1-ux500",
1575 .cra_flags = CRYPTO_ALG_ASYNC,
1576 .cra_blocksize = SHA1_BLOCK_SIZE,
1577 .cra_ctxsize = sizeof(struct hash_ctx),
1578 .cra_init = hash_cra_init,
1579 .cra_module = THIS_MODULE,
1580 }
1581 }
1582 },
1583 {
1584 .conf.algorithm = HASH_ALGO_SHA256,
1585 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1586 .hash = {
1587 .init = hash_init,
1588 .update = ahash_update,
1589 .final = ahash_final,
1590 .digest = hmac_sha256_digest,
1591 .setkey = hmac_sha256_setkey,
1592 .export = ahash_noexport,
1593 .import = ahash_noimport,
1594 .halg.digestsize = SHA256_DIGEST_SIZE,
1595 .halg.statesize = sizeof(struct hash_ctx),
1596 .halg.base = {
1597 .cra_name = "hmac(sha256)",
1598 .cra_driver_name = "hmac-sha256-ux500",
1599 .cra_flags = CRYPTO_ALG_ASYNC,
1600 .cra_blocksize = SHA256_BLOCK_SIZE,
1601 .cra_ctxsize = sizeof(struct hash_ctx),
1602 .cra_init = hash_cra_init,
1603 .cra_module = THIS_MODULE,
1604 }
1605 }
1606 }
1607};
1608
1609
1610
1611
1612static int ahash_algs_register_all(struct hash_device_data *device_data)
1613{
1614 int ret;
1615 int i;
1616 int count;
1617
1618 for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
1619 ret = crypto_register_ahash(&hash_algs[i].hash);
1620 if (ret) {
1621 count = i;
1622 dev_err(device_data->dev, "%s: alg registration failed\n",
1623 hash_algs[i].hash.halg.base.cra_driver_name);
1624 goto unreg;
1625 }
1626 }
1627 return 0;
1628unreg:
1629 for (i = 0; i < count; i++)
1630 crypto_unregister_ahash(&hash_algs[i].hash);
1631 return ret;
1632}
1633
1634
1635
1636
1637static void ahash_algs_unregister_all(struct hash_device_data *device_data)
1638{
1639 int i;
1640
1641 for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
1642 crypto_unregister_ahash(&hash_algs[i].hash);
1643}
1644
1645
1646
1647
1648
1649static int ux500_hash_probe(struct platform_device *pdev)
1650{
1651 int ret = 0;
1652 struct resource *res = NULL;
1653 struct hash_device_data *device_data;
1654 struct device *dev = &pdev->dev;
1655
1656 device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
1657 if (!device_data) {
1658 ret = -ENOMEM;
1659 goto out;
1660 }
1661
1662 device_data->dev = dev;
1663 device_data->current_ctx = NULL;
1664
1665 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1666 if (!res) {
1667 dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
1668 ret = -ENODEV;
1669 goto out;
1670 }
1671
1672 device_data->phybase = res->start;
1673 device_data->base = devm_ioremap_resource(dev, res);
1674 if (IS_ERR(device_data->base)) {
1675 dev_err(dev, "%s: ioremap() failed!\n", __func__);
1676 ret = PTR_ERR(device_data->base);
1677 goto out;
1678 }
1679 spin_lock_init(&device_data->ctx_lock);
1680 spin_lock_init(&device_data->power_state_lock);
1681
1682
1683 device_data->regulator = regulator_get(dev, "v-ape");
1684 if (IS_ERR(device_data->regulator)) {
1685 dev_err(dev, "%s: regulator_get() failed!\n", __func__);
1686 ret = PTR_ERR(device_data->regulator);
1687 device_data->regulator = NULL;
1688 goto out;
1689 }
1690
1691
1692 device_data->clk = devm_clk_get(dev, NULL);
1693 if (IS_ERR(device_data->clk)) {
1694 dev_err(dev, "%s: clk_get() failed!\n", __func__);
1695 ret = PTR_ERR(device_data->clk);
1696 goto out_regulator;
1697 }
1698
1699 ret = clk_prepare(device_data->clk);
1700 if (ret) {
1701 dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
1702 goto out_regulator;
1703 }
1704
1705
1706 ret = hash_enable_power(device_data, false);
1707 if (ret) {
1708 dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1709 goto out_clk_unprepare;
1710 }
1711
1712 ret = hash_check_hw(device_data);
1713 if (ret) {
1714 dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
1715 goto out_power;
1716 }
1717
1718 if (hash_mode == HASH_MODE_DMA)
1719 hash_dma_setup_channel(device_data, dev);
1720
1721 platform_set_drvdata(pdev, device_data);
1722
1723
1724 klist_add_tail(&device_data->list_node, &driver_data.device_list);
1725
1726 up(&driver_data.device_allocation);
1727
1728 ret = ahash_algs_register_all(device_data);
1729 if (ret) {
1730 dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
1731 __func__);
1732 goto out_power;
1733 }
1734
1735 dev_info(dev, "successfully registered\n");
1736 return 0;
1737
1738out_power:
1739 hash_disable_power(device_data, false);
1740
1741out_clk_unprepare:
1742 clk_unprepare(device_data->clk);
1743
1744out_regulator:
1745 regulator_put(device_data->regulator);
1746
1747out:
1748 return ret;
1749}
1750
1751
1752
1753
1754
1755static int ux500_hash_remove(struct platform_device *pdev)
1756{
1757 struct hash_device_data *device_data;
1758 struct device *dev = &pdev->dev;
1759
1760 device_data = platform_get_drvdata(pdev);
1761 if (!device_data) {
1762 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1763 return -ENOMEM;
1764 }
1765
1766
1767 if (down_trylock(&driver_data.device_allocation))
1768 return -EBUSY;
1769
1770
1771 spin_lock(&device_data->ctx_lock);
1772
1773 if (device_data->current_ctx) {
1774
1775 spin_unlock(&device_data->ctx_lock);
1776
1777 up(&driver_data.device_allocation);
1778 return -EBUSY;
1779 }
1780
1781 spin_unlock(&device_data->ctx_lock);
1782
1783
1784 if (klist_node_attached(&device_data->list_node))
1785 klist_remove(&device_data->list_node);
1786
1787
1788 if (list_empty(&driver_data.device_list.k_list))
1789 ahash_algs_unregister_all(device_data);
1790
1791 if (hash_disable_power(device_data, false))
1792 dev_err(dev, "%s: hash_disable_power() failed\n",
1793 __func__);
1794
1795 clk_unprepare(device_data->clk);
1796 regulator_put(device_data->regulator);
1797
1798 return 0;
1799}
1800
1801
1802
1803
1804
1805static void ux500_hash_shutdown(struct platform_device *pdev)
1806{
1807 struct hash_device_data *device_data;
1808
1809 device_data = platform_get_drvdata(pdev);
1810 if (!device_data) {
1811 dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
1812 __func__);
1813 return;
1814 }
1815
1816
1817 spin_lock(&device_data->ctx_lock);
1818
1819 if (!device_data->current_ctx) {
1820 if (down_trylock(&driver_data.device_allocation))
1821 dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
1822 __func__);
1823
1824
1825
1826
1827
1828 device_data->current_ctx++;
1829 }
1830 spin_unlock(&device_data->ctx_lock);
1831
1832
1833 if (klist_node_attached(&device_data->list_node))
1834 klist_remove(&device_data->list_node);
1835
1836
1837 if (list_empty(&driver_data.device_list.k_list))
1838 ahash_algs_unregister_all(device_data);
1839
1840 if (hash_disable_power(device_data, false))
1841 dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
1842 __func__);
1843}
1844
1845#ifdef CONFIG_PM_SLEEP
1846
1847
1848
1849
1850static int ux500_hash_suspend(struct device *dev)
1851{
1852 int ret;
1853 struct hash_device_data *device_data;
1854 struct hash_ctx *temp_ctx = NULL;
1855
1856 device_data = dev_get_drvdata(dev);
1857 if (!device_data) {
1858 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1859 return -ENOMEM;
1860 }
1861
1862 spin_lock(&device_data->ctx_lock);
1863 if (!device_data->current_ctx)
1864 device_data->current_ctx++;
1865 spin_unlock(&device_data->ctx_lock);
1866
1867 if (device_data->current_ctx == ++temp_ctx) {
1868 if (down_interruptible(&driver_data.device_allocation))
1869 dev_dbg(dev, "%s: down_interruptible() failed\n",
1870 __func__);
1871 ret = hash_disable_power(device_data, false);
1872
1873 } else {
1874 ret = hash_disable_power(device_data, true);
1875 }
1876
1877 if (ret)
1878 dev_err(dev, "%s: hash_disable_power()\n", __func__);
1879
1880 return ret;
1881}
1882
1883
1884
1885
1886
1887static int ux500_hash_resume(struct device *dev)
1888{
1889 int ret = 0;
1890 struct hash_device_data *device_data;
1891 struct hash_ctx *temp_ctx = NULL;
1892
1893 device_data = dev_get_drvdata(dev);
1894 if (!device_data) {
1895 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1896 return -ENOMEM;
1897 }
1898
1899 spin_lock(&device_data->ctx_lock);
1900 if (device_data->current_ctx == ++temp_ctx)
1901 device_data->current_ctx = NULL;
1902 spin_unlock(&device_data->ctx_lock);
1903
1904 if (!device_data->current_ctx)
1905 up(&driver_data.device_allocation);
1906 else
1907 ret = hash_enable_power(device_data, true);
1908
1909 if (ret)
1910 dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1911
1912 return ret;
1913}
1914#endif
1915
1916static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
1917
1918static const struct of_device_id ux500_hash_match[] = {
1919 { .compatible = "stericsson,ux500-hash" },
1920 { },
1921};
1922MODULE_DEVICE_TABLE(of, ux500_hash_match);
1923
1924static struct platform_driver hash_driver = {
1925 .probe = ux500_hash_probe,
1926 .remove = ux500_hash_remove,
1927 .shutdown = ux500_hash_shutdown,
1928 .driver = {
1929 .name = "hash1",
1930 .of_match_table = ux500_hash_match,
1931 .pm = &ux500_hash_pm,
1932 }
1933};
1934
1935
1936
1937
1938static int __init ux500_hash_mod_init(void)
1939{
1940 klist_init(&driver_data.device_list, NULL, NULL);
1941
1942 sema_init(&driver_data.device_allocation, 0);
1943
1944 return platform_driver_register(&hash_driver);
1945}
1946
1947
1948
1949
1950static void __exit ux500_hash_mod_fini(void)
1951{
1952 platform_driver_unregister(&hash_driver);
1953}
1954
1955module_init(ux500_hash_mod_init);
1956module_exit(ux500_hash_mod_fini);
1957
1958MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
1959MODULE_LICENSE("GPL");
1960
1961MODULE_ALIAS_CRYPTO("sha1-all");
1962MODULE_ALIAS_CRYPTO("sha256-all");
1963MODULE_ALIAS_CRYPTO("hmac-sha1-all");
1964MODULE_ALIAS_CRYPTO("hmac-sha256-all");
1965