1
2
3
4
5
6
7
8
9
10
11
12
13
14#define pr_fmt(fmt) "hashX hashX: " fmt
15
16#include <linux/clk.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/io.h>
22#include <linux/klist.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/mod_devicetable.h>
26#include <linux/platform_device.h>
27#include <linux/crypto.h>
28
29#include <linux/regulator/consumer.h>
30#include <linux/dmaengine.h>
31#include <linux/bitops.h>
32
33#include <crypto/internal/hash.h>
34#include <crypto/sha1.h>
35#include <crypto/sha2.h>
36#include <crypto/scatterwalk.h>
37#include <crypto/algapi.h>
38
39#include <linux/platform_data/crypto-ux500.h>
40
41#include "hash_alg.h"
42
43static int hash_mode;
44module_param(hash_mode, int, 0);
45MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
46
47
48static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
49 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
50 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
51 0x70, 0x69, 0x0e, 0x1d
52};
53
54
55static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
56 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
57 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
58 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
59 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
60};
61
62
63
64
65
66
67
68struct hash_driver_data {
69 struct klist device_list;
70 struct semaphore device_allocation;
71};
72
73static struct hash_driver_data driver_data;
74
75
76
77
78
79
80
81
82
83
84
85
86static void hash_messagepad(struct hash_device_data *device_data,
87 const u32 *message, u8 index_bytes);
88
89
90
91
92
93
94static void release_hash_device(struct hash_device_data *device_data)
95{
96 spin_lock(&device_data->ctx_lock);
97 device_data->current_ctx->device = NULL;
98 device_data->current_ctx = NULL;
99 spin_unlock(&device_data->ctx_lock);
100
101
102
103
104
105 up(&driver_data.device_allocation);
106}
107
108static void hash_dma_setup_channel(struct hash_device_data *device_data,
109 struct device *dev)
110{
111 struct hash_platform_data *platform_data = dev->platform_data;
112 struct dma_slave_config conf = {
113 .direction = DMA_MEM_TO_DEV,
114 .dst_addr = device_data->phybase + HASH_DMA_FIFO,
115 .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
116 .dst_maxburst = 16,
117 };
118
119 dma_cap_zero(device_data->dma.mask);
120 dma_cap_set(DMA_SLAVE, device_data->dma.mask);
121
122 device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
123 device_data->dma.chan_mem2hash =
124 dma_request_channel(device_data->dma.mask,
125 platform_data->dma_filter,
126 device_data->dma.cfg_mem2hash);
127
128 dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
129
130 init_completion(&device_data->dma.complete);
131}
132
133static void hash_dma_callback(void *data)
134{
135 struct hash_ctx *ctx = data;
136
137 complete(&ctx->device->dma.complete);
138}
139
140static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
141 int len, enum dma_data_direction direction)
142{
143 struct dma_async_tx_descriptor *desc = NULL;
144 struct dma_chan *channel = NULL;
145
146 if (direction != DMA_TO_DEVICE) {
147 dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
148 __func__);
149 return -EFAULT;
150 }
151
152 sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
153
154 channel = ctx->device->dma.chan_mem2hash;
155 ctx->device->dma.sg = sg;
156 ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
157 ctx->device->dma.sg, ctx->device->dma.nents,
158 direction);
159
160 if (!ctx->device->dma.sg_len) {
161 dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
162 __func__);
163 return -EFAULT;
164 }
165
166 dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
167 __func__);
168 desc = dmaengine_prep_slave_sg(channel,
169 ctx->device->dma.sg, ctx->device->dma.sg_len,
170 DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
171 if (!desc) {
172 dev_err(ctx->device->dev,
173 "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
174 return -EFAULT;
175 }
176
177 desc->callback = hash_dma_callback;
178 desc->callback_param = ctx;
179
180 dmaengine_submit(desc);
181 dma_async_issue_pending(channel);
182
183 return 0;
184}
185
186static void hash_dma_done(struct hash_ctx *ctx)
187{
188 struct dma_chan *chan;
189
190 chan = ctx->device->dma.chan_mem2hash;
191 dmaengine_terminate_all(chan);
192 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
193 ctx->device->dma.nents, DMA_TO_DEVICE);
194}
195
196static int hash_dma_write(struct hash_ctx *ctx,
197 struct scatterlist *sg, int len)
198{
199 int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
200 if (error) {
201 dev_dbg(ctx->device->dev,
202 "%s: hash_set_dma_transfer() failed\n", __func__);
203 return error;
204 }
205
206 return len;
207}
208
209
210
211
212
213
214
215
216
217static int get_empty_message_digest(
218 struct hash_device_data *device_data,
219 u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
220{
221 int ret = 0;
222 struct hash_ctx *ctx = device_data->current_ctx;
223 *zero_digest = false;
224
225
226
227
228
229 if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
230 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
231 memcpy(zero_hash, &sha1_zero_message_hash[0],
232 SHA1_DIGEST_SIZE);
233 *zero_hash_size = SHA1_DIGEST_SIZE;
234 *zero_digest = true;
235 } else if (HASH_ALGO_SHA256 ==
236 ctx->config.algorithm) {
237 memcpy(zero_hash, &sha256_zero_message_hash[0],
238 SHA256_DIGEST_SIZE);
239 *zero_hash_size = SHA256_DIGEST_SIZE;
240 *zero_digest = true;
241 } else {
242 dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
243 __func__);
244 ret = -EINVAL;
245 goto out;
246 }
247 } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
248 if (!ctx->keylen) {
249 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
250 memcpy(zero_hash, &zero_message_hmac_sha1[0],
251 SHA1_DIGEST_SIZE);
252 *zero_hash_size = SHA1_DIGEST_SIZE;
253 *zero_digest = true;
254 } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
255 memcpy(zero_hash, &zero_message_hmac_sha256[0],
256 SHA256_DIGEST_SIZE);
257 *zero_hash_size = SHA256_DIGEST_SIZE;
258 *zero_digest = true;
259 } else {
260 dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
261 __func__);
262 ret = -EINVAL;
263 goto out;
264 }
265 } else {
266 dev_dbg(device_data->dev,
267 "%s: Continue hash calculation, since hmac key available\n",
268 __func__);
269 }
270 }
271out:
272
273 return ret;
274}
275
276
277
278
279
280
281
282
283
284static int hash_disable_power(struct hash_device_data *device_data,
285 bool save_device_state)
286{
287 int ret = 0;
288 struct device *dev = device_data->dev;
289
290 spin_lock(&device_data->power_state_lock);
291 if (!device_data->power_state)
292 goto out;
293
294 if (save_device_state) {
295 hash_save_state(device_data,
296 &device_data->state);
297 device_data->restore_dev_state = true;
298 }
299
300 clk_disable(device_data->clk);
301 ret = regulator_disable(device_data->regulator);
302 if (ret)
303 dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
304
305 device_data->power_state = false;
306
307out:
308 spin_unlock(&device_data->power_state_lock);
309
310 return ret;
311}
312
313
314
315
316
317
318
319
320
321static int hash_enable_power(struct hash_device_data *device_data,
322 bool restore_device_state)
323{
324 int ret = 0;
325 struct device *dev = device_data->dev;
326
327 spin_lock(&device_data->power_state_lock);
328 if (!device_data->power_state) {
329 ret = regulator_enable(device_data->regulator);
330 if (ret) {
331 dev_err(dev, "%s: regulator_enable() failed!\n",
332 __func__);
333 goto out;
334 }
335 ret = clk_enable(device_data->clk);
336 if (ret) {
337 dev_err(dev, "%s: clk_enable() failed!\n", __func__);
338 ret = regulator_disable(
339 device_data->regulator);
340 goto out;
341 }
342 device_data->power_state = true;
343 }
344
345 if (device_data->restore_dev_state) {
346 if (restore_device_state) {
347 device_data->restore_dev_state = false;
348 hash_resume_state(device_data, &device_data->state);
349 }
350 }
351out:
352 spin_unlock(&device_data->power_state_lock);
353
354 return ret;
355}
356
357
358
359
360
361
362
363
364
365
366static int hash_get_device_data(struct hash_ctx *ctx,
367 struct hash_device_data **device_data)
368{
369 int ret;
370 struct klist_iter device_iterator;
371 struct klist_node *device_node;
372 struct hash_device_data *local_device_data = NULL;
373
374
375 ret = down_interruptible(&driver_data.device_allocation);
376 if (ret)
377 return ret;
378
379
380 klist_iter_init(&driver_data.device_list, &device_iterator);
381 device_node = klist_next(&device_iterator);
382 while (device_node) {
383 local_device_data = container_of(device_node,
384 struct hash_device_data, list_node);
385 spin_lock(&local_device_data->ctx_lock);
386
387 if (local_device_data->current_ctx) {
388 device_node = klist_next(&device_iterator);
389 } else {
390 local_device_data->current_ctx = ctx;
391 ctx->device = local_device_data;
392 spin_unlock(&local_device_data->ctx_lock);
393 break;
394 }
395 spin_unlock(&local_device_data->ctx_lock);
396 }
397 klist_iter_exit(&device_iterator);
398
399 if (!device_node) {
400
401
402
403
404
405
406
407
408 return -EBUSY;
409 }
410
411 *device_data = local_device_data;
412
413 return 0;
414}
415
416
417
418
419
420
421
422
423
424
425
426
427static void hash_hw_write_key(struct hash_device_data *device_data,
428 const u8 *key, unsigned int keylen)
429{
430 u32 word = 0;
431 int nwords = 1;
432
433 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
434
435 while (keylen >= 4) {
436 u32 *key_word = (u32 *)key;
437
438 HASH_SET_DIN(key_word, nwords);
439 keylen -= 4;
440 key += 4;
441 }
442
443
444 if (keylen) {
445 word = 0;
446 while (keylen) {
447 word |= (key[keylen - 1] << (8 * (keylen - 1)));
448 keylen--;
449 }
450
451 HASH_SET_DIN(&word, nwords);
452 }
453
454 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
455 cpu_relax();
456
457 HASH_SET_DCAL;
458
459 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
460 cpu_relax();
461}
462
463
464
465
466
467
468
469
470
471static int init_hash_hw(struct hash_device_data *device_data,
472 struct hash_ctx *ctx)
473{
474 int ret = 0;
475
476 ret = hash_setconfiguration(device_data, &ctx->config);
477 if (ret) {
478 dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
479 __func__);
480 return ret;
481 }
482
483 hash_begin(device_data, ctx);
484
485 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
486 hash_hw_write_key(device_data, ctx->key, ctx->keylen);
487
488 return ret;
489}
490
491
492
493
494
495
496
497
498
499static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
500{
501 int nents = 0;
502 bool aligned_data = true;
503
504 while (size > 0 && sg) {
505 nents++;
506 size -= sg->length;
507
508
509 if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
510 (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
511 aligned_data = false;
512
513 sg = sg_next(sg);
514 }
515
516 if (aligned)
517 *aligned = aligned_data;
518
519 if (size != 0)
520 return -EFAULT;
521
522 return nents;
523}
524
525
526
527
528
529
530
531
532
533static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
534{
535 bool aligned;
536
537
538 if (hash_get_nents(sg, datasize, &aligned) < 1)
539 return false;
540
541 return aligned;
542}
543
544
545
546
547
548
549
550static int ux500_hash_init(struct ahash_request *req)
551{
552 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
553 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
554 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
555
556 if (!ctx->key)
557 ctx->keylen = 0;
558
559 memset(&req_ctx->state, 0, sizeof(struct hash_state));
560 req_ctx->updated = 0;
561 if (hash_mode == HASH_MODE_DMA) {
562 if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
563 req_ctx->dma_mode = false;
564
565 pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
566 __func__, HASH_DMA_ALIGN_SIZE);
567 } else {
568 if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
569 hash_dma_valid_data(req->src, req->nbytes)) {
570 req_ctx->dma_mode = true;
571 } else {
572 req_ctx->dma_mode = false;
573 pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
574 __func__,
575 HASH_DMA_PERFORMANCE_MIN_SIZE);
576 }
577 }
578 }
579 return 0;
580}
581
582
583
584
585
586
587
588
589
590
591static void hash_processblock(struct hash_device_data *device_data,
592 const u32 *message, int length)
593{
594 int len = length / HASH_BYTES_PER_WORD;
595
596
597
598 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
599
600
601
602
603 HASH_SET_DIN(message, len);
604}
605
606
607
608
609
610
611
612
613
614
615
616static void hash_messagepad(struct hash_device_data *device_data,
617 const u32 *message, u8 index_bytes)
618{
619 int nwords = 1;
620
621
622
623
624
625 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
626
627
628 while (index_bytes >= 4) {
629 HASH_SET_DIN(message, nwords);
630 index_bytes -= 4;
631 message++;
632 }
633
634 if (index_bytes)
635 HASH_SET_DIN(message, nwords);
636
637 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
638 cpu_relax();
639
640
641 HASH_SET_NBLW(index_bytes * 8);
642 dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
643 __func__, readl_relaxed(&device_data->base->din),
644 readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
645 HASH_SET_DCAL;
646 dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
647 __func__, readl_relaxed(&device_data->base->din),
648 readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
649
650 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
651 cpu_relax();
652}
653
654
655
656
657
658
659
660
661
662static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
663{
664 ctx->state.length.low_word += incr;
665
666
667 if (ctx->state.length.low_word < incr)
668 ctx->state.length.high_word++;
669}
670
671
672
673
674
675
676
677int hash_setconfiguration(struct hash_device_data *device_data,
678 struct hash_config *config)
679{
680 int ret = 0;
681
682 if (config->algorithm != HASH_ALGO_SHA1 &&
683 config->algorithm != HASH_ALGO_SHA256)
684 return -EPERM;
685
686
687
688
689
690 HASH_SET_DATA_FORMAT(config->data_format);
691
692
693
694
695 switch (config->algorithm) {
696 case HASH_ALGO_SHA1:
697 HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
698 break;
699
700 case HASH_ALGO_SHA256:
701 HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
702 break;
703
704 default:
705 dev_err(device_data->dev, "%s: Incorrect algorithm\n",
706 __func__);
707 return -EPERM;
708 }
709
710
711
712
713
714 if (HASH_OPER_MODE_HASH == config->oper_mode)
715 HASH_CLEAR_BITS(&device_data->base->cr,
716 HASH_CR_MODE_MASK);
717 else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
718 HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
719 if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
720
721 dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
722 HASH_SET_BITS(&device_data->base->cr,
723 HASH_CR_LKEY_MASK);
724 } else {
725 dev_dbg(device_data->dev, "%s: LKEY cleared\n",
726 __func__);
727 HASH_CLEAR_BITS(&device_data->base->cr,
728 HASH_CR_LKEY_MASK);
729 }
730 } else {
731 ret = -EPERM;
732 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
733 __func__);
734 }
735 return ret;
736}
737
738
739
740
741
742
743
744void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
745{
746
747
748
749 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
750 cpu_relax();
751
752
753
754
755
756
757 HASH_INITIALIZE;
758
759
760
761
762 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
763}
764
765static int hash_process_data(struct hash_device_data *device_data,
766 struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
767 int msg_length, u8 *data_buffer, u8 *buffer,
768 u8 *index)
769{
770 int ret = 0;
771 u32 count;
772
773 do {
774 if ((*index + msg_length) < HASH_BLOCK_SIZE) {
775 for (count = 0; count < msg_length; count++) {
776 buffer[*index + count] =
777 *(data_buffer + count);
778 }
779 *index += msg_length;
780 msg_length = 0;
781 } else {
782 if (req_ctx->updated) {
783 ret = hash_resume_state(device_data,
784 &device_data->state);
785 memmove(req_ctx->state.buffer,
786 device_data->state.buffer,
787 HASH_BLOCK_SIZE);
788 if (ret) {
789 dev_err(device_data->dev,
790 "%s: hash_resume_state() failed!\n",
791 __func__);
792 goto out;
793 }
794 } else {
795 ret = init_hash_hw(device_data, ctx);
796 if (ret) {
797 dev_err(device_data->dev,
798 "%s: init_hash_hw() failed!\n",
799 __func__);
800 goto out;
801 }
802 req_ctx->updated = 1;
803 }
804
805
806
807
808
809
810
811 if (IS_ALIGNED((unsigned long)data_buffer, 4) &&
812 (0 == *index))
813 hash_processblock(device_data,
814 (const u32 *)data_buffer,
815 HASH_BLOCK_SIZE);
816 else {
817 for (count = 0;
818 count < (u32)(HASH_BLOCK_SIZE - *index);
819 count++) {
820 buffer[*index + count] =
821 *(data_buffer + count);
822 }
823 hash_processblock(device_data,
824 (const u32 *)buffer,
825 HASH_BLOCK_SIZE);
826 }
827 hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
828 data_buffer += (HASH_BLOCK_SIZE - *index);
829
830 msg_length -= (HASH_BLOCK_SIZE - *index);
831 *index = 0;
832
833 ret = hash_save_state(device_data,
834 &device_data->state);
835
836 memmove(device_data->state.buffer,
837 req_ctx->state.buffer,
838 HASH_BLOCK_SIZE);
839 if (ret) {
840 dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
841 __func__);
842 goto out;
843 }
844 }
845 } while (msg_length != 0);
846out:
847
848 return ret;
849}
850
851
852
853
854
855static int hash_dma_final(struct ahash_request *req)
856{
857 int ret = 0;
858 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
859 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
860 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
861 struct hash_device_data *device_data;
862 u8 digest[SHA256_DIGEST_SIZE];
863 int bytes_written = 0;
864
865 ret = hash_get_device_data(ctx, &device_data);
866 if (ret)
867 return ret;
868
869 dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
870 (unsigned long)ctx);
871
872 if (req_ctx->updated) {
873 ret = hash_resume_state(device_data, &device_data->state);
874
875 if (ret) {
876 dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
877 __func__);
878 goto out;
879 }
880 }
881
882 if (!req_ctx->updated) {
883 ret = hash_setconfiguration(device_data, &ctx->config);
884 if (ret) {
885 dev_err(device_data->dev,
886 "%s: hash_setconfiguration() failed!\n",
887 __func__);
888 goto out;
889 }
890
891
892 if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
893 HASH_CLEAR_BITS(&device_data->base->cr,
894 HASH_CR_DMAE_MASK);
895 } else {
896 HASH_SET_BITS(&device_data->base->cr,
897 HASH_CR_DMAE_MASK);
898 HASH_SET_BITS(&device_data->base->cr,
899 HASH_CR_PRIVN_MASK);
900 }
901
902 HASH_INITIALIZE;
903
904 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
905 hash_hw_write_key(device_data, ctx->key, ctx->keylen);
906
907
908 HASH_SET_NBLW((req->nbytes * 8) % 32);
909 req_ctx->updated = 1;
910 }
911
912
913 ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
914 if (!ctx->device->dma.nents) {
915 dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
916 __func__);
917 ret = ctx->device->dma.nents;
918 goto out;
919 }
920
921 bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
922 if (bytes_written != req->nbytes) {
923 dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
924 __func__);
925 ret = bytes_written;
926 goto out;
927 }
928
929 wait_for_completion(&ctx->device->dma.complete);
930 hash_dma_done(ctx);
931
932 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
933 cpu_relax();
934
935 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
936 unsigned int keylen = ctx->keylen;
937 u8 *key = ctx->key;
938
939 dev_dbg(device_data->dev, "%s: keylen: %d\n",
940 __func__, ctx->keylen);
941 hash_hw_write_key(device_data, key, keylen);
942 }
943
944 hash_get_digest(device_data, digest, ctx->config.algorithm);
945 memcpy(req->result, digest, ctx->digestsize);
946
947out:
948 release_hash_device(device_data);
949
950
951
952
953 kfree(ctx->key);
954
955 return ret;
956}
957
958
959
960
961
962static int hash_hw_final(struct ahash_request *req)
963{
964 int ret = 0;
965 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
966 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
967 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
968 struct hash_device_data *device_data;
969 u8 digest[SHA256_DIGEST_SIZE];
970
971 ret = hash_get_device_data(ctx, &device_data);
972 if (ret)
973 return ret;
974
975 dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
976 (unsigned long)ctx);
977
978 if (req_ctx->updated) {
979 ret = hash_resume_state(device_data, &device_data->state);
980
981 if (ret) {
982 dev_err(device_data->dev,
983 "%s: hash_resume_state() failed!\n", __func__);
984 goto out;
985 }
986 } else if (req->nbytes == 0 && ctx->keylen == 0) {
987 u8 zero_hash[SHA256_DIGEST_SIZE];
988 u32 zero_hash_size = 0;
989 bool zero_digest = false;
990
991
992
993
994 ret = get_empty_message_digest(device_data, &zero_hash[0],
995 &zero_hash_size, &zero_digest);
996 if (!ret && likely(zero_hash_size == ctx->digestsize) &&
997 zero_digest) {
998 memcpy(req->result, &zero_hash[0], ctx->digestsize);
999 goto out;
1000 } else if (!ret && !zero_digest) {
1001 dev_dbg(device_data->dev,
1002 "%s: HMAC zero msg with key, continue...\n",
1003 __func__);
1004 } else {
1005 dev_err(device_data->dev,
1006 "%s: ret=%d, or wrong digest size? %s\n",
1007 __func__, ret,
1008 zero_hash_size == ctx->digestsize ?
1009 "true" : "false");
1010
1011 goto out;
1012 }
1013 } else if (req->nbytes == 0 && ctx->keylen > 0) {
1014 ret = -EPERM;
1015 dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
1016 __func__);
1017 goto out;
1018 }
1019
1020 if (!req_ctx->updated) {
1021 ret = init_hash_hw(device_data, ctx);
1022 if (ret) {
1023 dev_err(device_data->dev,
1024 "%s: init_hash_hw() failed!\n", __func__);
1025 goto out;
1026 }
1027 }
1028
1029 if (req_ctx->state.index) {
1030 hash_messagepad(device_data, req_ctx->state.buffer,
1031 req_ctx->state.index);
1032 } else {
1033 HASH_SET_DCAL;
1034 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1035 cpu_relax();
1036 }
1037
1038 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
1039 unsigned int keylen = ctx->keylen;
1040 u8 *key = ctx->key;
1041
1042 dev_dbg(device_data->dev, "%s: keylen: %d\n",
1043 __func__, ctx->keylen);
1044 hash_hw_write_key(device_data, key, keylen);
1045 }
1046
1047 hash_get_digest(device_data, digest, ctx->config.algorithm);
1048 memcpy(req->result, digest, ctx->digestsize);
1049
1050out:
1051 release_hash_device(device_data);
1052
1053
1054
1055
1056 kfree(ctx->key);
1057
1058 return ret;
1059}
1060
1061
1062
1063
1064
1065
1066
1067int hash_hw_update(struct ahash_request *req)
1068{
1069 int ret = 0;
1070 u8 index = 0;
1071 u8 *buffer;
1072 struct hash_device_data *device_data;
1073 u8 *data_buffer;
1074 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1075 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1076 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1077 struct crypto_hash_walk walk;
1078 int msg_length;
1079
1080 index = req_ctx->state.index;
1081 buffer = (u8 *)req_ctx->state.buffer;
1082
1083 ret = hash_get_device_data(ctx, &device_data);
1084 if (ret)
1085 return ret;
1086
1087 msg_length = crypto_hash_walk_first(req, &walk);
1088
1089
1090 if (msg_length == 0) {
1091 ret = 0;
1092 goto release_dev;
1093 }
1094
1095
1096
1097 if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
1098 HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
1099 pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
1100 ret = crypto_hash_walk_done(&walk, -EPERM);
1101 goto release_dev;
1102 }
1103
1104
1105 while (0 != msg_length) {
1106 data_buffer = walk.data;
1107 ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
1108 data_buffer, buffer, &index);
1109
1110 if (ret) {
1111 dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
1112 __func__);
1113 crypto_hash_walk_done(&walk, ret);
1114 goto release_dev;
1115 }
1116
1117 msg_length = crypto_hash_walk_done(&walk, 0);
1118 }
1119
1120 req_ctx->state.index = index;
1121 dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
1122 __func__, req_ctx->state.index, req_ctx->state.bit_index);
1123
1124release_dev:
1125 release_hash_device(device_data);
1126
1127 return ret;
1128}
1129
1130
1131
1132
1133
1134
1135int hash_resume_state(struct hash_device_data *device_data,
1136 const struct hash_state *device_state)
1137{
1138 u32 temp_cr;
1139 s32 count;
1140 int hash_mode = HASH_OPER_MODE_HASH;
1141
1142 if (NULL == device_state) {
1143 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1144 __func__);
1145 return -EPERM;
1146 }
1147
1148
1149 if (device_state->index > HASH_BLOCK_SIZE ||
1150 (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
1151 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1152 __func__);
1153 return -EPERM;
1154 }
1155
1156
1157
1158
1159
1160
1161 HASH_INITIALIZE;
1162
1163 temp_cr = device_state->temp_cr;
1164 writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
1165
1166 if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1167 hash_mode = HASH_OPER_MODE_HMAC;
1168 else
1169 hash_mode = HASH_OPER_MODE_HASH;
1170
1171 for (count = 0; count < HASH_CSR_COUNT; count++) {
1172 if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1173 break;
1174
1175 writel_relaxed(device_state->csr[count],
1176 &device_data->base->csrx[count]);
1177 }
1178
1179 writel_relaxed(device_state->csfull, &device_data->base->csfull);
1180 writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
1181
1182 writel_relaxed(device_state->str_reg, &device_data->base->str);
1183 writel_relaxed(temp_cr, &device_data->base->cr);
1184
1185 return 0;
1186}
1187
1188
1189
1190
1191
1192
1193int hash_save_state(struct hash_device_data *device_data,
1194 struct hash_state *device_state)
1195{
1196 u32 temp_cr;
1197 u32 count;
1198 int hash_mode = HASH_OPER_MODE_HASH;
1199
1200 if (NULL == device_state) {
1201 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1202 __func__);
1203 return -ENOTSUPP;
1204 }
1205
1206
1207
1208
1209
1210 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1211 cpu_relax();
1212
1213 temp_cr = readl_relaxed(&device_data->base->cr);
1214
1215 device_state->str_reg = readl_relaxed(&device_data->base->str);
1216
1217 device_state->din_reg = readl_relaxed(&device_data->base->din);
1218
1219 if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1220 hash_mode = HASH_OPER_MODE_HMAC;
1221 else
1222 hash_mode = HASH_OPER_MODE_HASH;
1223
1224 for (count = 0; count < HASH_CSR_COUNT; count++) {
1225 if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1226 break;
1227
1228 device_state->csr[count] =
1229 readl_relaxed(&device_data->base->csrx[count]);
1230 }
1231
1232 device_state->csfull = readl_relaxed(&device_data->base->csfull);
1233 device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
1234
1235 device_state->temp_cr = temp_cr;
1236
1237 return 0;
1238}
1239
1240
1241
1242
1243
1244
1245int hash_check_hw(struct hash_device_data *device_data)
1246{
1247
1248 if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
1249 HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
1250 HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
1251 HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
1252 HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
1253 HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
1254 HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
1255 HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
1256 return 0;
1257 }
1258
1259 dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
1260 return -ENOTSUPP;
1261}
1262
1263
1264
1265
1266
1267
1268
1269void hash_get_digest(struct hash_device_data *device_data,
1270 u8 *digest, int algorithm)
1271{
1272 u32 temp_hx_val, count;
1273 int loop_ctr;
1274
1275 if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
1276 dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
1277 __func__, algorithm);
1278 return;
1279 }
1280
1281 if (algorithm == HASH_ALGO_SHA1)
1282 loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
1283 else
1284 loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
1285
1286 dev_dbg(device_data->dev, "%s: digest array:(0x%lx)\n",
1287 __func__, (unsigned long)digest);
1288
1289
1290 for (count = 0; count < loop_ctr; count++) {
1291 temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
1292 digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
1293 digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
1294 digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
1295 digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
1296 }
1297}
1298
1299
1300
1301
1302
1303static int ahash_update(struct ahash_request *req)
1304{
1305 int ret = 0;
1306 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1307
1308 if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
1309 ret = hash_hw_update(req);
1310
1311
1312 if (ret) {
1313 pr_err("%s: hash_hw_update() failed!\n", __func__);
1314 }
1315
1316 return ret;
1317}
1318
1319
1320
1321
1322
1323static int ahash_final(struct ahash_request *req)
1324{
1325 int ret = 0;
1326 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1327
1328 pr_debug("%s: data size: %d\n", __func__, req->nbytes);
1329
1330 if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
1331 ret = hash_dma_final(req);
1332 else
1333 ret = hash_hw_final(req);
1334
1335 if (ret) {
1336 pr_err("%s: hash_hw/dma_final() failed\n", __func__);
1337 }
1338
1339 return ret;
1340}
1341
1342static int hash_setkey(struct crypto_ahash *tfm,
1343 const u8 *key, unsigned int keylen, int alg)
1344{
1345 int ret = 0;
1346 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1347
1348
1349
1350
1351 ctx->key = kmemdup(key, keylen, GFP_KERNEL);
1352 if (!ctx->key) {
1353 pr_err("%s: Failed to allocate ctx->key for %d\n",
1354 __func__, alg);
1355 return -ENOMEM;
1356 }
1357 ctx->keylen = keylen;
1358
1359 return ret;
1360}
1361
1362static int ahash_sha1_init(struct ahash_request *req)
1363{
1364 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1365 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1366
1367 ctx->config.data_format = HASH_DATA_8_BITS;
1368 ctx->config.algorithm = HASH_ALGO_SHA1;
1369 ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1370 ctx->digestsize = SHA1_DIGEST_SIZE;
1371
1372 return ux500_hash_init(req);
1373}
1374
1375static int ahash_sha256_init(struct ahash_request *req)
1376{
1377 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1378 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1379
1380 ctx->config.data_format = HASH_DATA_8_BITS;
1381 ctx->config.algorithm = HASH_ALGO_SHA256;
1382 ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1383 ctx->digestsize = SHA256_DIGEST_SIZE;
1384
1385 return ux500_hash_init(req);
1386}
1387
1388static int ahash_sha1_digest(struct ahash_request *req)
1389{
1390 int ret2, ret1;
1391
1392 ret1 = ahash_sha1_init(req);
1393 if (ret1)
1394 goto out;
1395
1396 ret1 = ahash_update(req);
1397 ret2 = ahash_final(req);
1398
1399out:
1400 return ret1 ? ret1 : ret2;
1401}
1402
1403static int ahash_sha256_digest(struct ahash_request *req)
1404{
1405 int ret2, ret1;
1406
1407 ret1 = ahash_sha256_init(req);
1408 if (ret1)
1409 goto out;
1410
1411 ret1 = ahash_update(req);
1412 ret2 = ahash_final(req);
1413
1414out:
1415 return ret1 ? ret1 : ret2;
1416}
1417
1418static int ahash_noimport(struct ahash_request *req, const void *in)
1419{
1420 return -ENOSYS;
1421}
1422
1423static int ahash_noexport(struct ahash_request *req, void *out)
1424{
1425 return -ENOSYS;
1426}
1427
1428static int hmac_sha1_init(struct ahash_request *req)
1429{
1430 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1431 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1432
1433 ctx->config.data_format = HASH_DATA_8_BITS;
1434 ctx->config.algorithm = HASH_ALGO_SHA1;
1435 ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
1436 ctx->digestsize = SHA1_DIGEST_SIZE;
1437
1438 return ux500_hash_init(req);
1439}
1440
1441static int hmac_sha256_init(struct ahash_request *req)
1442{
1443 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1444 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1445
1446 ctx->config.data_format = HASH_DATA_8_BITS;
1447 ctx->config.algorithm = HASH_ALGO_SHA256;
1448 ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
1449 ctx->digestsize = SHA256_DIGEST_SIZE;
1450
1451 return ux500_hash_init(req);
1452}
1453
1454static int hmac_sha1_digest(struct ahash_request *req)
1455{
1456 int ret2, ret1;
1457
1458 ret1 = hmac_sha1_init(req);
1459 if (ret1)
1460 goto out;
1461
1462 ret1 = ahash_update(req);
1463 ret2 = ahash_final(req);
1464
1465out:
1466 return ret1 ? ret1 : ret2;
1467}
1468
1469static int hmac_sha256_digest(struct ahash_request *req)
1470{
1471 int ret2, ret1;
1472
1473 ret1 = hmac_sha256_init(req);
1474 if (ret1)
1475 goto out;
1476
1477 ret1 = ahash_update(req);
1478 ret2 = ahash_final(req);
1479
1480out:
1481 return ret1 ? ret1 : ret2;
1482}
1483
1484static int hmac_sha1_setkey(struct crypto_ahash *tfm,
1485 const u8 *key, unsigned int keylen)
1486{
1487 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
1488}
1489
1490static int hmac_sha256_setkey(struct crypto_ahash *tfm,
1491 const u8 *key, unsigned int keylen)
1492{
1493 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
1494}
1495
1496struct hash_algo_template {
1497 struct hash_config conf;
1498 struct ahash_alg hash;
1499};
1500
1501static int hash_cra_init(struct crypto_tfm *tfm)
1502{
1503 struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
1504 struct crypto_alg *alg = tfm->__crt_alg;
1505 struct hash_algo_template *hash_alg;
1506
1507 hash_alg = container_of(__crypto_ahash_alg(alg),
1508 struct hash_algo_template,
1509 hash);
1510
1511 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1512 sizeof(struct hash_req_ctx));
1513
1514 ctx->config.data_format = HASH_DATA_8_BITS;
1515 ctx->config.algorithm = hash_alg->conf.algorithm;
1516 ctx->config.oper_mode = hash_alg->conf.oper_mode;
1517
1518 ctx->digestsize = hash_alg->hash.halg.digestsize;
1519
1520 return 0;
1521}
1522
1523static struct hash_algo_template hash_algs[] = {
1524 {
1525 .conf.algorithm = HASH_ALGO_SHA1,
1526 .conf.oper_mode = HASH_OPER_MODE_HASH,
1527 .hash = {
1528 .init = ux500_hash_init,
1529 .update = ahash_update,
1530 .final = ahash_final,
1531 .digest = ahash_sha1_digest,
1532 .export = ahash_noexport,
1533 .import = ahash_noimport,
1534 .halg.digestsize = SHA1_DIGEST_SIZE,
1535 .halg.statesize = sizeof(struct hash_ctx),
1536 .halg.base = {
1537 .cra_name = "sha1",
1538 .cra_driver_name = "sha1-ux500",
1539 .cra_flags = CRYPTO_ALG_ASYNC,
1540 .cra_blocksize = SHA1_BLOCK_SIZE,
1541 .cra_ctxsize = sizeof(struct hash_ctx),
1542 .cra_init = hash_cra_init,
1543 .cra_module = THIS_MODULE,
1544 }
1545 }
1546 },
1547 {
1548 .conf.algorithm = HASH_ALGO_SHA256,
1549 .conf.oper_mode = HASH_OPER_MODE_HASH,
1550 .hash = {
1551 .init = ux500_hash_init,
1552 .update = ahash_update,
1553 .final = ahash_final,
1554 .digest = ahash_sha256_digest,
1555 .export = ahash_noexport,
1556 .import = ahash_noimport,
1557 .halg.digestsize = SHA256_DIGEST_SIZE,
1558 .halg.statesize = sizeof(struct hash_ctx),
1559 .halg.base = {
1560 .cra_name = "sha256",
1561 .cra_driver_name = "sha256-ux500",
1562 .cra_flags = CRYPTO_ALG_ASYNC,
1563 .cra_blocksize = SHA256_BLOCK_SIZE,
1564 .cra_ctxsize = sizeof(struct hash_ctx),
1565 .cra_init = hash_cra_init,
1566 .cra_module = THIS_MODULE,
1567 }
1568 }
1569 },
1570 {
1571 .conf.algorithm = HASH_ALGO_SHA1,
1572 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1573 .hash = {
1574 .init = ux500_hash_init,
1575 .update = ahash_update,
1576 .final = ahash_final,
1577 .digest = hmac_sha1_digest,
1578 .setkey = hmac_sha1_setkey,
1579 .export = ahash_noexport,
1580 .import = ahash_noimport,
1581 .halg.digestsize = SHA1_DIGEST_SIZE,
1582 .halg.statesize = sizeof(struct hash_ctx),
1583 .halg.base = {
1584 .cra_name = "hmac(sha1)",
1585 .cra_driver_name = "hmac-sha1-ux500",
1586 .cra_flags = CRYPTO_ALG_ASYNC,
1587 .cra_blocksize = SHA1_BLOCK_SIZE,
1588 .cra_ctxsize = sizeof(struct hash_ctx),
1589 .cra_init = hash_cra_init,
1590 .cra_module = THIS_MODULE,
1591 }
1592 }
1593 },
1594 {
1595 .conf.algorithm = HASH_ALGO_SHA256,
1596 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1597 .hash = {
1598 .init = ux500_hash_init,
1599 .update = ahash_update,
1600 .final = ahash_final,
1601 .digest = hmac_sha256_digest,
1602 .setkey = hmac_sha256_setkey,
1603 .export = ahash_noexport,
1604 .import = ahash_noimport,
1605 .halg.digestsize = SHA256_DIGEST_SIZE,
1606 .halg.statesize = sizeof(struct hash_ctx),
1607 .halg.base = {
1608 .cra_name = "hmac(sha256)",
1609 .cra_driver_name = "hmac-sha256-ux500",
1610 .cra_flags = CRYPTO_ALG_ASYNC,
1611 .cra_blocksize = SHA256_BLOCK_SIZE,
1612 .cra_ctxsize = sizeof(struct hash_ctx),
1613 .cra_init = hash_cra_init,
1614 .cra_module = THIS_MODULE,
1615 }
1616 }
1617 }
1618};
1619
1620static int ahash_algs_register_all(struct hash_device_data *device_data)
1621{
1622 int ret;
1623 int i;
1624 int count;
1625
1626 for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
1627 ret = crypto_register_ahash(&hash_algs[i].hash);
1628 if (ret) {
1629 count = i;
1630 dev_err(device_data->dev, "%s: alg registration failed\n",
1631 hash_algs[i].hash.halg.base.cra_driver_name);
1632 goto unreg;
1633 }
1634 }
1635 return 0;
1636unreg:
1637 for (i = 0; i < count; i++)
1638 crypto_unregister_ahash(&hash_algs[i].hash);
1639 return ret;
1640}
1641
1642static void ahash_algs_unregister_all(struct hash_device_data *device_data)
1643{
1644 int i;
1645
1646 for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
1647 crypto_unregister_ahash(&hash_algs[i].hash);
1648}
1649
1650
1651
1652
1653
1654static int ux500_hash_probe(struct platform_device *pdev)
1655{
1656 int ret = 0;
1657 struct resource *res = NULL;
1658 struct hash_device_data *device_data;
1659 struct device *dev = &pdev->dev;
1660
1661 device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
1662 if (!device_data) {
1663 ret = -ENOMEM;
1664 goto out;
1665 }
1666
1667 device_data->dev = dev;
1668 device_data->current_ctx = NULL;
1669
1670 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1671 if (!res) {
1672 dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
1673 ret = -ENODEV;
1674 goto out;
1675 }
1676
1677 device_data->phybase = res->start;
1678 device_data->base = devm_ioremap_resource(dev, res);
1679 if (IS_ERR(device_data->base)) {
1680 ret = PTR_ERR(device_data->base);
1681 goto out;
1682 }
1683 spin_lock_init(&device_data->ctx_lock);
1684 spin_lock_init(&device_data->power_state_lock);
1685
1686
1687 device_data->regulator = regulator_get(dev, "v-ape");
1688 if (IS_ERR(device_data->regulator)) {
1689 dev_err(dev, "%s: regulator_get() failed!\n", __func__);
1690 ret = PTR_ERR(device_data->regulator);
1691 device_data->regulator = NULL;
1692 goto out;
1693 }
1694
1695
1696 device_data->clk = devm_clk_get(dev, NULL);
1697 if (IS_ERR(device_data->clk)) {
1698 dev_err(dev, "%s: clk_get() failed!\n", __func__);
1699 ret = PTR_ERR(device_data->clk);
1700 goto out_regulator;
1701 }
1702
1703 ret = clk_prepare(device_data->clk);
1704 if (ret) {
1705 dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
1706 goto out_regulator;
1707 }
1708
1709
1710 ret = hash_enable_power(device_data, false);
1711 if (ret) {
1712 dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1713 goto out_clk_unprepare;
1714 }
1715
1716 ret = hash_check_hw(device_data);
1717 if (ret) {
1718 dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
1719 goto out_power;
1720 }
1721
1722 if (hash_mode == HASH_MODE_DMA)
1723 hash_dma_setup_channel(device_data, dev);
1724
1725 platform_set_drvdata(pdev, device_data);
1726
1727
1728 klist_add_tail(&device_data->list_node, &driver_data.device_list);
1729
1730 up(&driver_data.device_allocation);
1731
1732 ret = ahash_algs_register_all(device_data);
1733 if (ret) {
1734 dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
1735 __func__);
1736 goto out_power;
1737 }
1738
1739 dev_info(dev, "successfully registered\n");
1740 return 0;
1741
1742out_power:
1743 hash_disable_power(device_data, false);
1744
1745out_clk_unprepare:
1746 clk_unprepare(device_data->clk);
1747
1748out_regulator:
1749 regulator_put(device_data->regulator);
1750
1751out:
1752 return ret;
1753}
1754
1755
1756
1757
1758
1759static int ux500_hash_remove(struct platform_device *pdev)
1760{
1761 struct hash_device_data *device_data;
1762 struct device *dev = &pdev->dev;
1763
1764 device_data = platform_get_drvdata(pdev);
1765 if (!device_data) {
1766 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1767 return -ENOMEM;
1768 }
1769
1770
1771 if (down_trylock(&driver_data.device_allocation))
1772 return -EBUSY;
1773
1774
1775 spin_lock(&device_data->ctx_lock);
1776
1777 if (device_data->current_ctx) {
1778
1779 spin_unlock(&device_data->ctx_lock);
1780
1781 up(&driver_data.device_allocation);
1782 return -EBUSY;
1783 }
1784
1785 spin_unlock(&device_data->ctx_lock);
1786
1787
1788 if (klist_node_attached(&device_data->list_node))
1789 klist_remove(&device_data->list_node);
1790
1791
1792 if (list_empty(&driver_data.device_list.k_list))
1793 ahash_algs_unregister_all(device_data);
1794
1795 if (hash_disable_power(device_data, false))
1796 dev_err(dev, "%s: hash_disable_power() failed\n",
1797 __func__);
1798
1799 clk_unprepare(device_data->clk);
1800 regulator_put(device_data->regulator);
1801
1802 return 0;
1803}
1804
1805
1806
1807
1808
1809static void ux500_hash_shutdown(struct platform_device *pdev)
1810{
1811 struct hash_device_data *device_data;
1812
1813 device_data = platform_get_drvdata(pdev);
1814 if (!device_data) {
1815 dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
1816 __func__);
1817 return;
1818 }
1819
1820
1821 spin_lock(&device_data->ctx_lock);
1822
1823 if (!device_data->current_ctx) {
1824 if (down_trylock(&driver_data.device_allocation))
1825 dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
1826 __func__);
1827
1828
1829
1830
1831
1832 device_data->current_ctx++;
1833 }
1834 spin_unlock(&device_data->ctx_lock);
1835
1836
1837 if (klist_node_attached(&device_data->list_node))
1838 klist_remove(&device_data->list_node);
1839
1840
1841 if (list_empty(&driver_data.device_list.k_list))
1842 ahash_algs_unregister_all(device_data);
1843
1844 if (hash_disable_power(device_data, false))
1845 dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
1846 __func__);
1847}
1848
1849#ifdef CONFIG_PM_SLEEP
1850
1851
1852
1853
1854static int ux500_hash_suspend(struct device *dev)
1855{
1856 int ret;
1857 struct hash_device_data *device_data;
1858 struct hash_ctx *temp_ctx = NULL;
1859
1860 device_data = dev_get_drvdata(dev);
1861 if (!device_data) {
1862 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1863 return -ENOMEM;
1864 }
1865
1866 spin_lock(&device_data->ctx_lock);
1867 if (!device_data->current_ctx)
1868 device_data->current_ctx++;
1869 spin_unlock(&device_data->ctx_lock);
1870
1871 if (device_data->current_ctx == ++temp_ctx) {
1872 if (down_interruptible(&driver_data.device_allocation))
1873 dev_dbg(dev, "%s: down_interruptible() failed\n",
1874 __func__);
1875 ret = hash_disable_power(device_data, false);
1876
1877 } else {
1878 ret = hash_disable_power(device_data, true);
1879 }
1880
1881 if (ret)
1882 dev_err(dev, "%s: hash_disable_power()\n", __func__);
1883
1884 return ret;
1885}
1886
1887
1888
1889
1890
1891static int ux500_hash_resume(struct device *dev)
1892{
1893 int ret = 0;
1894 struct hash_device_data *device_data;
1895 struct hash_ctx *temp_ctx = NULL;
1896
1897 device_data = dev_get_drvdata(dev);
1898 if (!device_data) {
1899 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1900 return -ENOMEM;
1901 }
1902
1903 spin_lock(&device_data->ctx_lock);
1904 if (device_data->current_ctx == ++temp_ctx)
1905 device_data->current_ctx = NULL;
1906 spin_unlock(&device_data->ctx_lock);
1907
1908 if (!device_data->current_ctx)
1909 up(&driver_data.device_allocation);
1910 else
1911 ret = hash_enable_power(device_data, true);
1912
1913 if (ret)
1914 dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1915
1916 return ret;
1917}
1918#endif
1919
1920static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
1921
1922static const struct of_device_id ux500_hash_match[] = {
1923 { .compatible = "stericsson,ux500-hash" },
1924 { },
1925};
1926MODULE_DEVICE_TABLE(of, ux500_hash_match);
1927
1928static struct platform_driver hash_driver = {
1929 .probe = ux500_hash_probe,
1930 .remove = ux500_hash_remove,
1931 .shutdown = ux500_hash_shutdown,
1932 .driver = {
1933 .name = "hash1",
1934 .of_match_table = ux500_hash_match,
1935 .pm = &ux500_hash_pm,
1936 }
1937};
1938
1939
1940
1941
1942static int __init ux500_hash_mod_init(void)
1943{
1944 klist_init(&driver_data.device_list, NULL, NULL);
1945
1946 sema_init(&driver_data.device_allocation, 0);
1947
1948 return platform_driver_register(&hash_driver);
1949}
1950
1951
1952
1953
1954static void __exit ux500_hash_mod_fini(void)
1955{
1956 platform_driver_unregister(&hash_driver);
1957}
1958
1959module_init(ux500_hash_mod_init);
1960module_exit(ux500_hash_mod_fini);
1961
1962MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
1963MODULE_LICENSE("GPL");
1964
1965MODULE_ALIAS_CRYPTO("sha1-all");
1966MODULE_ALIAS_CRYPTO("sha256-all");
1967MODULE_ALIAS_CRYPTO("hmac-sha1-all");
1968MODULE_ALIAS_CRYPTO("hmac-sha256-all");
1969