1
2
3
4
5
6
7
8
9
10
11
12
13
14#define pr_fmt(fmt) "hashX hashX: " fmt
15
16#include <linux/clk.h>
17#include <linux/device.h>
18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/klist.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/mod_devicetable.h>
25#include <linux/platform_device.h>
26#include <linux/crypto.h>
27
28#include <linux/regulator/consumer.h>
29#include <linux/dmaengine.h>
30#include <linux/bitops.h>
31
32#include <crypto/internal/hash.h>
33#include <crypto/sha.h>
34#include <crypto/scatterwalk.h>
35#include <crypto/algapi.h>
36
37#include <linux/platform_data/crypto-ux500.h>
38
39#include "hash_alg.h"
40
41static int hash_mode;
42module_param(hash_mode, int, 0);
43MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
44
45
46static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
47 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
48 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
49 0x70, 0x69, 0x0e, 0x1d
50};
51
52
53static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
54 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
55 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
56 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
57 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
58};
59
60
61
62
63
64
65
66struct hash_driver_data {
67 struct klist device_list;
68 struct semaphore device_allocation;
69};
70
71static struct hash_driver_data driver_data;
72
73
74
75
76
77
78
79
80
81
82
83
84static void hash_messagepad(struct hash_device_data *device_data,
85 const u32 *message, u8 index_bytes);
86
87
88
89
90
91
92static void release_hash_device(struct hash_device_data *device_data)
93{
94 spin_lock(&device_data->ctx_lock);
95 device_data->current_ctx->device = NULL;
96 device_data->current_ctx = NULL;
97 spin_unlock(&device_data->ctx_lock);
98
99
100
101
102
103 up(&driver_data.device_allocation);
104}
105
106static void hash_dma_setup_channel(struct hash_device_data *device_data,
107 struct device *dev)
108{
109 struct hash_platform_data *platform_data = dev->platform_data;
110 struct dma_slave_config conf = {
111 .direction = DMA_MEM_TO_DEV,
112 .dst_addr = device_data->phybase + HASH_DMA_FIFO,
113 .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
114 .dst_maxburst = 16,
115 };
116
117 dma_cap_zero(device_data->dma.mask);
118 dma_cap_set(DMA_SLAVE, device_data->dma.mask);
119
120 device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
121 device_data->dma.chan_mem2hash =
122 dma_request_channel(device_data->dma.mask,
123 platform_data->dma_filter,
124 device_data->dma.cfg_mem2hash);
125
126 dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
127
128 init_completion(&device_data->dma.complete);
129}
130
131static void hash_dma_callback(void *data)
132{
133 struct hash_ctx *ctx = data;
134
135 complete(&ctx->device->dma.complete);
136}
137
138static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
139 int len, enum dma_data_direction direction)
140{
141 struct dma_async_tx_descriptor *desc = NULL;
142 struct dma_chan *channel = NULL;
143
144 if (direction != DMA_TO_DEVICE) {
145 dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
146 __func__);
147 return -EFAULT;
148 }
149
150 sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
151
152 channel = ctx->device->dma.chan_mem2hash;
153 ctx->device->dma.sg = sg;
154 ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
155 ctx->device->dma.sg, ctx->device->dma.nents,
156 direction);
157
158 if (!ctx->device->dma.sg_len) {
159 dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
160 __func__);
161 return -EFAULT;
162 }
163
164 dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
165 __func__);
166 desc = dmaengine_prep_slave_sg(channel,
167 ctx->device->dma.sg, ctx->device->dma.sg_len,
168 DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
169 if (!desc) {
170 dev_err(ctx->device->dev,
171 "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
172 return -EFAULT;
173 }
174
175 desc->callback = hash_dma_callback;
176 desc->callback_param = ctx;
177
178 dmaengine_submit(desc);
179 dma_async_issue_pending(channel);
180
181 return 0;
182}
183
184static void hash_dma_done(struct hash_ctx *ctx)
185{
186 struct dma_chan *chan;
187
188 chan = ctx->device->dma.chan_mem2hash;
189 dmaengine_terminate_all(chan);
190 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
191 ctx->device->dma.sg_len, DMA_TO_DEVICE);
192}
193
194static int hash_dma_write(struct hash_ctx *ctx,
195 struct scatterlist *sg, int len)
196{
197 int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
198 if (error) {
199 dev_dbg(ctx->device->dev,
200 "%s: hash_set_dma_transfer() failed\n", __func__);
201 return error;
202 }
203
204 return len;
205}
206
207
208
209
210
211
212
213
214
215static int get_empty_message_digest(
216 struct hash_device_data *device_data,
217 u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
218{
219 int ret = 0;
220 struct hash_ctx *ctx = device_data->current_ctx;
221 *zero_digest = false;
222
223
224
225
226
227 if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
228 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
229 memcpy(zero_hash, &sha1_zero_message_hash[0],
230 SHA1_DIGEST_SIZE);
231 *zero_hash_size = SHA1_DIGEST_SIZE;
232 *zero_digest = true;
233 } else if (HASH_ALGO_SHA256 ==
234 ctx->config.algorithm) {
235 memcpy(zero_hash, &sha256_zero_message_hash[0],
236 SHA256_DIGEST_SIZE);
237 *zero_hash_size = SHA256_DIGEST_SIZE;
238 *zero_digest = true;
239 } else {
240 dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
241 __func__);
242 ret = -EINVAL;
243 goto out;
244 }
245 } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
246 if (!ctx->keylen) {
247 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
248 memcpy(zero_hash, &zero_message_hmac_sha1[0],
249 SHA1_DIGEST_SIZE);
250 *zero_hash_size = SHA1_DIGEST_SIZE;
251 *zero_digest = true;
252 } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
253 memcpy(zero_hash, &zero_message_hmac_sha256[0],
254 SHA256_DIGEST_SIZE);
255 *zero_hash_size = SHA256_DIGEST_SIZE;
256 *zero_digest = true;
257 } else {
258 dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
259 __func__);
260 ret = -EINVAL;
261 goto out;
262 }
263 } else {
264 dev_dbg(device_data->dev,
265 "%s: Continue hash calculation, since hmac key available\n",
266 __func__);
267 }
268 }
269out:
270
271 return ret;
272}
273
274
275
276
277
278
279
280
281
282static int hash_disable_power(struct hash_device_data *device_data,
283 bool save_device_state)
284{
285 int ret = 0;
286 struct device *dev = device_data->dev;
287
288 spin_lock(&device_data->power_state_lock);
289 if (!device_data->power_state)
290 goto out;
291
292 if (save_device_state) {
293 hash_save_state(device_data,
294 &device_data->state);
295 device_data->restore_dev_state = true;
296 }
297
298 clk_disable(device_data->clk);
299 ret = regulator_disable(device_data->regulator);
300 if (ret)
301 dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
302
303 device_data->power_state = false;
304
305out:
306 spin_unlock(&device_data->power_state_lock);
307
308 return ret;
309}
310
311
312
313
314
315
316
317
318
319static int hash_enable_power(struct hash_device_data *device_data,
320 bool restore_device_state)
321{
322 int ret = 0;
323 struct device *dev = device_data->dev;
324
325 spin_lock(&device_data->power_state_lock);
326 if (!device_data->power_state) {
327 ret = regulator_enable(device_data->regulator);
328 if (ret) {
329 dev_err(dev, "%s: regulator_enable() failed!\n",
330 __func__);
331 goto out;
332 }
333 ret = clk_enable(device_data->clk);
334 if (ret) {
335 dev_err(dev, "%s: clk_enable() failed!\n", __func__);
336 ret = regulator_disable(
337 device_data->regulator);
338 goto out;
339 }
340 device_data->power_state = true;
341 }
342
343 if (device_data->restore_dev_state) {
344 if (restore_device_state) {
345 device_data->restore_dev_state = false;
346 hash_resume_state(device_data, &device_data->state);
347 }
348 }
349out:
350 spin_unlock(&device_data->power_state_lock);
351
352 return ret;
353}
354
355
356
357
358
359
360
361
362
363
364static int hash_get_device_data(struct hash_ctx *ctx,
365 struct hash_device_data **device_data)
366{
367 int ret;
368 struct klist_iter device_iterator;
369 struct klist_node *device_node;
370 struct hash_device_data *local_device_data = NULL;
371
372
373 ret = down_interruptible(&driver_data.device_allocation);
374 if (ret)
375 return ret;
376
377
378 klist_iter_init(&driver_data.device_list, &device_iterator);
379 device_node = klist_next(&device_iterator);
380 while (device_node) {
381 local_device_data = container_of(device_node,
382 struct hash_device_data, list_node);
383 spin_lock(&local_device_data->ctx_lock);
384
385 if (local_device_data->current_ctx) {
386 device_node = klist_next(&device_iterator);
387 } else {
388 local_device_data->current_ctx = ctx;
389 ctx->device = local_device_data;
390 spin_unlock(&local_device_data->ctx_lock);
391 break;
392 }
393 spin_unlock(&local_device_data->ctx_lock);
394 }
395 klist_iter_exit(&device_iterator);
396
397 if (!device_node) {
398
399
400
401
402
403
404
405
406 return -EBUSY;
407 }
408
409 *device_data = local_device_data;
410
411 return 0;
412}
413
414
415
416
417
418
419
420
421
422
423
424
425static void hash_hw_write_key(struct hash_device_data *device_data,
426 const u8 *key, unsigned int keylen)
427{
428 u32 word = 0;
429 int nwords = 1;
430
431 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
432
433 while (keylen >= 4) {
434 u32 *key_word = (u32 *)key;
435
436 HASH_SET_DIN(key_word, nwords);
437 keylen -= 4;
438 key += 4;
439 }
440
441
442 if (keylen) {
443 word = 0;
444 while (keylen) {
445 word |= (key[keylen - 1] << (8 * (keylen - 1)));
446 keylen--;
447 }
448
449 HASH_SET_DIN(&word, nwords);
450 }
451
452 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
453 cpu_relax();
454
455 HASH_SET_DCAL;
456
457 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
458 cpu_relax();
459}
460
461
462
463
464
465
466
467
468
469static int init_hash_hw(struct hash_device_data *device_data,
470 struct hash_ctx *ctx)
471{
472 int ret = 0;
473
474 ret = hash_setconfiguration(device_data, &ctx->config);
475 if (ret) {
476 dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
477 __func__);
478 return ret;
479 }
480
481 hash_begin(device_data, ctx);
482
483 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
484 hash_hw_write_key(device_data, ctx->key, ctx->keylen);
485
486 return ret;
487}
488
489
490
491
492
493
494
495
496
497static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
498{
499 int nents = 0;
500 bool aligned_data = true;
501
502 while (size > 0 && sg) {
503 nents++;
504 size -= sg->length;
505
506
507 if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
508 (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
509 aligned_data = false;
510
511 sg = sg_next(sg);
512 }
513
514 if (aligned)
515 *aligned = aligned_data;
516
517 if (size != 0)
518 return -EFAULT;
519
520 return nents;
521}
522
523
524
525
526
527
528
529
530
531static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
532{
533 bool aligned;
534
535
536 if (hash_get_nents(sg, datasize, &aligned) < 1)
537 return false;
538
539 return aligned;
540}
541
542
543
544
545
546
547
548static int ux500_hash_init(struct ahash_request *req)
549{
550 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
551 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
552 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
553
554 if (!ctx->key)
555 ctx->keylen = 0;
556
557 memset(&req_ctx->state, 0, sizeof(struct hash_state));
558 req_ctx->updated = 0;
559 if (hash_mode == HASH_MODE_DMA) {
560 if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
561 req_ctx->dma_mode = false;
562
563 pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
564 __func__, HASH_DMA_ALIGN_SIZE);
565 } else {
566 if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
567 hash_dma_valid_data(req->src, req->nbytes)) {
568 req_ctx->dma_mode = true;
569 } else {
570 req_ctx->dma_mode = false;
571 pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
572 __func__,
573 HASH_DMA_PERFORMANCE_MIN_SIZE);
574 }
575 }
576 }
577 return 0;
578}
579
580
581
582
583
584
585
586
587
588static void hash_processblock(struct hash_device_data *device_data,
589 const u32 *message, int length)
590{
591 int len = length / HASH_BYTES_PER_WORD;
592
593
594
595 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
596
597
598
599
600 HASH_SET_DIN(message, len);
601}
602
603
604
605
606
607
608
609
610
611
612
613static void hash_messagepad(struct hash_device_data *device_data,
614 const u32 *message, u8 index_bytes)
615{
616 int nwords = 1;
617
618
619
620
621
622 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
623
624
625 while (index_bytes >= 4) {
626 HASH_SET_DIN(message, nwords);
627 index_bytes -= 4;
628 message++;
629 }
630
631 if (index_bytes)
632 HASH_SET_DIN(message, nwords);
633
634 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
635 cpu_relax();
636
637
638 HASH_SET_NBLW(index_bytes * 8);
639 dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
640 __func__, readl_relaxed(&device_data->base->din),
641 readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
642 HASH_SET_DCAL;
643 dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
644 __func__, readl_relaxed(&device_data->base->din),
645 readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
646
647 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
648 cpu_relax();
649}
650
651
652
653
654
655
656
657
658
659static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
660{
661 ctx->state.length.low_word += incr;
662
663
664 if (ctx->state.length.low_word < incr)
665 ctx->state.length.high_word++;
666}
667
668
669
670
671
672
673
674int hash_setconfiguration(struct hash_device_data *device_data,
675 struct hash_config *config)
676{
677 int ret = 0;
678
679 if (config->algorithm != HASH_ALGO_SHA1 &&
680 config->algorithm != HASH_ALGO_SHA256)
681 return -EPERM;
682
683
684
685
686
687 HASH_SET_DATA_FORMAT(config->data_format);
688
689
690
691
692 switch (config->algorithm) {
693 case HASH_ALGO_SHA1:
694 HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
695 break;
696
697 case HASH_ALGO_SHA256:
698 HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
699 break;
700
701 default:
702 dev_err(device_data->dev, "%s: Incorrect algorithm\n",
703 __func__);
704 return -EPERM;
705 }
706
707
708
709
710
711 if (HASH_OPER_MODE_HASH == config->oper_mode)
712 HASH_CLEAR_BITS(&device_data->base->cr,
713 HASH_CR_MODE_MASK);
714 else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
715 HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
716 if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
717
718 dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
719 HASH_SET_BITS(&device_data->base->cr,
720 HASH_CR_LKEY_MASK);
721 } else {
722 dev_dbg(device_data->dev, "%s: LKEY cleared\n",
723 __func__);
724 HASH_CLEAR_BITS(&device_data->base->cr,
725 HASH_CR_LKEY_MASK);
726 }
727 } else {
728 ret = -EPERM;
729 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
730 __func__);
731 }
732 return ret;
733}
734
735
736
737
738
739
740
741void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
742{
743
744
745
746 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
747 cpu_relax();
748
749
750
751
752
753
754 HASH_INITIALIZE;
755
756
757
758
759 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
760}
761
762static int hash_process_data(struct hash_device_data *device_data,
763 struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
764 int msg_length, u8 *data_buffer, u8 *buffer,
765 u8 *index)
766{
767 int ret = 0;
768 u32 count;
769
770 do {
771 if ((*index + msg_length) < HASH_BLOCK_SIZE) {
772 for (count = 0; count < msg_length; count++) {
773 buffer[*index + count] =
774 *(data_buffer + count);
775 }
776 *index += msg_length;
777 msg_length = 0;
778 } else {
779 if (req_ctx->updated) {
780 ret = hash_resume_state(device_data,
781 &device_data->state);
782 memmove(req_ctx->state.buffer,
783 device_data->state.buffer,
784 HASH_BLOCK_SIZE);
785 if (ret) {
786 dev_err(device_data->dev,
787 "%s: hash_resume_state() failed!\n",
788 __func__);
789 goto out;
790 }
791 } else {
792 ret = init_hash_hw(device_data, ctx);
793 if (ret) {
794 dev_err(device_data->dev,
795 "%s: init_hash_hw() failed!\n",
796 __func__);
797 goto out;
798 }
799 req_ctx->updated = 1;
800 }
801
802
803
804
805
806
807
808 if (IS_ALIGNED((unsigned long)data_buffer, 4) &&
809 (0 == *index))
810 hash_processblock(device_data,
811 (const u32 *)data_buffer,
812 HASH_BLOCK_SIZE);
813 else {
814 for (count = 0;
815 count < (u32)(HASH_BLOCK_SIZE - *index);
816 count++) {
817 buffer[*index + count] =
818 *(data_buffer + count);
819 }
820 hash_processblock(device_data,
821 (const u32 *)buffer,
822 HASH_BLOCK_SIZE);
823 }
824 hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
825 data_buffer += (HASH_BLOCK_SIZE - *index);
826
827 msg_length -= (HASH_BLOCK_SIZE - *index);
828 *index = 0;
829
830 ret = hash_save_state(device_data,
831 &device_data->state);
832
833 memmove(device_data->state.buffer,
834 req_ctx->state.buffer,
835 HASH_BLOCK_SIZE);
836 if (ret) {
837 dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
838 __func__);
839 goto out;
840 }
841 }
842 } while (msg_length != 0);
843out:
844
845 return ret;
846}
847
848
849
850
851
852static int hash_dma_final(struct ahash_request *req)
853{
854 int ret = 0;
855 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
856 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
857 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
858 struct hash_device_data *device_data;
859 u8 digest[SHA256_DIGEST_SIZE];
860 int bytes_written = 0;
861
862 ret = hash_get_device_data(ctx, &device_data);
863 if (ret)
864 return ret;
865
866 dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
867 (unsigned long)ctx);
868
869 if (req_ctx->updated) {
870 ret = hash_resume_state(device_data, &device_data->state);
871
872 if (ret) {
873 dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
874 __func__);
875 goto out;
876 }
877 }
878
879 if (!req_ctx->updated) {
880 ret = hash_setconfiguration(device_data, &ctx->config);
881 if (ret) {
882 dev_err(device_data->dev,
883 "%s: hash_setconfiguration() failed!\n",
884 __func__);
885 goto out;
886 }
887
888
889 if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
890 HASH_CLEAR_BITS(&device_data->base->cr,
891 HASH_CR_DMAE_MASK);
892 } else {
893 HASH_SET_BITS(&device_data->base->cr,
894 HASH_CR_DMAE_MASK);
895 HASH_SET_BITS(&device_data->base->cr,
896 HASH_CR_PRIVN_MASK);
897 }
898
899 HASH_INITIALIZE;
900
901 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
902 hash_hw_write_key(device_data, ctx->key, ctx->keylen);
903
904
905 HASH_SET_NBLW((req->nbytes * 8) % 32);
906 req_ctx->updated = 1;
907 }
908
909
910 ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
911 if (!ctx->device->dma.nents) {
912 dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
913 __func__);
914 ret = ctx->device->dma.nents;
915 goto out;
916 }
917
918 bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
919 if (bytes_written != req->nbytes) {
920 dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
921 __func__);
922 ret = bytes_written;
923 goto out;
924 }
925
926 wait_for_completion(&ctx->device->dma.complete);
927 hash_dma_done(ctx);
928
929 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
930 cpu_relax();
931
932 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
933 unsigned int keylen = ctx->keylen;
934 u8 *key = ctx->key;
935
936 dev_dbg(device_data->dev, "%s: keylen: %d\n",
937 __func__, ctx->keylen);
938 hash_hw_write_key(device_data, key, keylen);
939 }
940
941 hash_get_digest(device_data, digest, ctx->config.algorithm);
942 memcpy(req->result, digest, ctx->digestsize);
943
944out:
945 release_hash_device(device_data);
946
947
948
949
950 kfree(ctx->key);
951
952 return ret;
953}
954
955
956
957
958
959static int hash_hw_final(struct ahash_request *req)
960{
961 int ret = 0;
962 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
963 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
964 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
965 struct hash_device_data *device_data;
966 u8 digest[SHA256_DIGEST_SIZE];
967
968 ret = hash_get_device_data(ctx, &device_data);
969 if (ret)
970 return ret;
971
972 dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
973 (unsigned long)ctx);
974
975 if (req_ctx->updated) {
976 ret = hash_resume_state(device_data, &device_data->state);
977
978 if (ret) {
979 dev_err(device_data->dev,
980 "%s: hash_resume_state() failed!\n", __func__);
981 goto out;
982 }
983 } else if (req->nbytes == 0 && ctx->keylen == 0) {
984 u8 zero_hash[SHA256_DIGEST_SIZE];
985 u32 zero_hash_size = 0;
986 bool zero_digest = false;
987
988
989
990
991 ret = get_empty_message_digest(device_data, &zero_hash[0],
992 &zero_hash_size, &zero_digest);
993 if (!ret && likely(zero_hash_size == ctx->digestsize) &&
994 zero_digest) {
995 memcpy(req->result, &zero_hash[0], ctx->digestsize);
996 goto out;
997 } else if (!ret && !zero_digest) {
998 dev_dbg(device_data->dev,
999 "%s: HMAC zero msg with key, continue...\n",
1000 __func__);
1001 } else {
1002 dev_err(device_data->dev,
1003 "%s: ret=%d, or wrong digest size? %s\n",
1004 __func__, ret,
1005 zero_hash_size == ctx->digestsize ?
1006 "true" : "false");
1007
1008 goto out;
1009 }
1010 } else if (req->nbytes == 0 && ctx->keylen > 0) {
1011 dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
1012 __func__);
1013 goto out;
1014 }
1015
1016 if (!req_ctx->updated) {
1017 ret = init_hash_hw(device_data, ctx);
1018 if (ret) {
1019 dev_err(device_data->dev,
1020 "%s: init_hash_hw() failed!\n", __func__);
1021 goto out;
1022 }
1023 }
1024
1025 if (req_ctx->state.index) {
1026 hash_messagepad(device_data, req_ctx->state.buffer,
1027 req_ctx->state.index);
1028 } else {
1029 HASH_SET_DCAL;
1030 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1031 cpu_relax();
1032 }
1033
1034 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
1035 unsigned int keylen = ctx->keylen;
1036 u8 *key = ctx->key;
1037
1038 dev_dbg(device_data->dev, "%s: keylen: %d\n",
1039 __func__, ctx->keylen);
1040 hash_hw_write_key(device_data, key, keylen);
1041 }
1042
1043 hash_get_digest(device_data, digest, ctx->config.algorithm);
1044 memcpy(req->result, digest, ctx->digestsize);
1045
1046out:
1047 release_hash_device(device_data);
1048
1049
1050
1051
1052 kfree(ctx->key);
1053
1054 return ret;
1055}
1056
1057
1058
1059
1060
1061
1062
1063int hash_hw_update(struct ahash_request *req)
1064{
1065 int ret = 0;
1066 u8 index = 0;
1067 u8 *buffer;
1068 struct hash_device_data *device_data;
1069 u8 *data_buffer;
1070 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1071 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1072 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1073 struct crypto_hash_walk walk;
1074 int msg_length = crypto_hash_walk_first(req, &walk);
1075
1076
1077 if (msg_length == 0)
1078 return ret;
1079
1080 index = req_ctx->state.index;
1081 buffer = (u8 *)req_ctx->state.buffer;
1082
1083
1084
1085 if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
1086 HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
1087 pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
1088 return -EPERM;
1089 }
1090
1091 ret = hash_get_device_data(ctx, &device_data);
1092 if (ret)
1093 return ret;
1094
1095
1096 while (0 != msg_length) {
1097 data_buffer = walk.data;
1098 ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
1099 data_buffer, buffer, &index);
1100
1101 if (ret) {
1102 dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
1103 __func__);
1104 goto out;
1105 }
1106
1107 msg_length = crypto_hash_walk_done(&walk, 0);
1108 }
1109
1110 req_ctx->state.index = index;
1111 dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
1112 __func__, req_ctx->state.index, req_ctx->state.bit_index);
1113
1114out:
1115 release_hash_device(device_data);
1116
1117 return ret;
1118}
1119
1120
1121
1122
1123
1124
1125int hash_resume_state(struct hash_device_data *device_data,
1126 const struct hash_state *device_state)
1127{
1128 u32 temp_cr;
1129 s32 count;
1130 int hash_mode = HASH_OPER_MODE_HASH;
1131
1132 if (NULL == device_state) {
1133 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1134 __func__);
1135 return -EPERM;
1136 }
1137
1138
1139 if (device_state->index > HASH_BLOCK_SIZE ||
1140 (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
1141 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1142 __func__);
1143 return -EPERM;
1144 }
1145
1146
1147
1148
1149
1150
1151 HASH_INITIALIZE;
1152
1153 temp_cr = device_state->temp_cr;
1154 writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
1155
1156 if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1157 hash_mode = HASH_OPER_MODE_HMAC;
1158 else
1159 hash_mode = HASH_OPER_MODE_HASH;
1160
1161 for (count = 0; count < HASH_CSR_COUNT; count++) {
1162 if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1163 break;
1164
1165 writel_relaxed(device_state->csr[count],
1166 &device_data->base->csrx[count]);
1167 }
1168
1169 writel_relaxed(device_state->csfull, &device_data->base->csfull);
1170 writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
1171
1172 writel_relaxed(device_state->str_reg, &device_data->base->str);
1173 writel_relaxed(temp_cr, &device_data->base->cr);
1174
1175 return 0;
1176}
1177
1178
1179
1180
1181
1182
1183int hash_save_state(struct hash_device_data *device_data,
1184 struct hash_state *device_state)
1185{
1186 u32 temp_cr;
1187 u32 count;
1188 int hash_mode = HASH_OPER_MODE_HASH;
1189
1190 if (NULL == device_state) {
1191 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1192 __func__);
1193 return -ENOTSUPP;
1194 }
1195
1196
1197
1198
1199
1200 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1201 cpu_relax();
1202
1203 temp_cr = readl_relaxed(&device_data->base->cr);
1204
1205 device_state->str_reg = readl_relaxed(&device_data->base->str);
1206
1207 device_state->din_reg = readl_relaxed(&device_data->base->din);
1208
1209 if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1210 hash_mode = HASH_OPER_MODE_HMAC;
1211 else
1212 hash_mode = HASH_OPER_MODE_HASH;
1213
1214 for (count = 0; count < HASH_CSR_COUNT; count++) {
1215 if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1216 break;
1217
1218 device_state->csr[count] =
1219 readl_relaxed(&device_data->base->csrx[count]);
1220 }
1221
1222 device_state->csfull = readl_relaxed(&device_data->base->csfull);
1223 device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
1224
1225 device_state->temp_cr = temp_cr;
1226
1227 return 0;
1228}
1229
1230
1231
1232
1233
1234
1235int hash_check_hw(struct hash_device_data *device_data)
1236{
1237
1238 if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
1239 HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
1240 HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
1241 HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
1242 HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
1243 HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
1244 HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
1245 HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
1246 return 0;
1247 }
1248
1249 dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
1250 return -ENOTSUPP;
1251}
1252
1253
1254
1255
1256
1257
1258
1259void hash_get_digest(struct hash_device_data *device_data,
1260 u8 *digest, int algorithm)
1261{
1262 u32 temp_hx_val, count;
1263 int loop_ctr;
1264
1265 if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
1266 dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
1267 __func__, algorithm);
1268 return;
1269 }
1270
1271 if (algorithm == HASH_ALGO_SHA1)
1272 loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
1273 else
1274 loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
1275
1276 dev_dbg(device_data->dev, "%s: digest array:(0x%lx)\n",
1277 __func__, (unsigned long)digest);
1278
1279
1280 for (count = 0; count < loop_ctr; count++) {
1281 temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
1282 digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
1283 digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
1284 digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
1285 digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
1286 }
1287}
1288
1289
1290
1291
1292
1293static int ahash_update(struct ahash_request *req)
1294{
1295 int ret = 0;
1296 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1297
1298 if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
1299 ret = hash_hw_update(req);
1300
1301
1302 if (ret) {
1303 pr_err("%s: hash_hw_update() failed!\n", __func__);
1304 }
1305
1306 return ret;
1307}
1308
1309
1310
1311
1312
1313static int ahash_final(struct ahash_request *req)
1314{
1315 int ret = 0;
1316 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1317
1318 pr_debug("%s: data size: %d\n", __func__, req->nbytes);
1319
1320 if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
1321 ret = hash_dma_final(req);
1322 else
1323 ret = hash_hw_final(req);
1324
1325 if (ret) {
1326 pr_err("%s: hash_hw/dma_final() failed\n", __func__);
1327 }
1328
1329 return ret;
1330}
1331
1332static int hash_setkey(struct crypto_ahash *tfm,
1333 const u8 *key, unsigned int keylen, int alg)
1334{
1335 int ret = 0;
1336 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1337
1338
1339
1340
1341 ctx->key = kmemdup(key, keylen, GFP_KERNEL);
1342 if (!ctx->key) {
1343 pr_err("%s: Failed to allocate ctx->key for %d\n",
1344 __func__, alg);
1345 return -ENOMEM;
1346 }
1347 ctx->keylen = keylen;
1348
1349 return ret;
1350}
1351
1352static int ahash_sha1_init(struct ahash_request *req)
1353{
1354 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1355 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1356
1357 ctx->config.data_format = HASH_DATA_8_BITS;
1358 ctx->config.algorithm = HASH_ALGO_SHA1;
1359 ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1360 ctx->digestsize = SHA1_DIGEST_SIZE;
1361
1362 return ux500_hash_init(req);
1363}
1364
1365static int ahash_sha256_init(struct ahash_request *req)
1366{
1367 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1368 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1369
1370 ctx->config.data_format = HASH_DATA_8_BITS;
1371 ctx->config.algorithm = HASH_ALGO_SHA256;
1372 ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1373 ctx->digestsize = SHA256_DIGEST_SIZE;
1374
1375 return ux500_hash_init(req);
1376}
1377
1378static int ahash_sha1_digest(struct ahash_request *req)
1379{
1380 int ret2, ret1;
1381
1382 ret1 = ahash_sha1_init(req);
1383 if (ret1)
1384 goto out;
1385
1386 ret1 = ahash_update(req);
1387 ret2 = ahash_final(req);
1388
1389out:
1390 return ret1 ? ret1 : ret2;
1391}
1392
1393static int ahash_sha256_digest(struct ahash_request *req)
1394{
1395 int ret2, ret1;
1396
1397 ret1 = ahash_sha256_init(req);
1398 if (ret1)
1399 goto out;
1400
1401 ret1 = ahash_update(req);
1402 ret2 = ahash_final(req);
1403
1404out:
1405 return ret1 ? ret1 : ret2;
1406}
1407
1408static int ahash_noimport(struct ahash_request *req, const void *in)
1409{
1410 return -ENOSYS;
1411}
1412
1413static int ahash_noexport(struct ahash_request *req, void *out)
1414{
1415 return -ENOSYS;
1416}
1417
1418static int hmac_sha1_init(struct ahash_request *req)
1419{
1420 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1421 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1422
1423 ctx->config.data_format = HASH_DATA_8_BITS;
1424 ctx->config.algorithm = HASH_ALGO_SHA1;
1425 ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
1426 ctx->digestsize = SHA1_DIGEST_SIZE;
1427
1428 return ux500_hash_init(req);
1429}
1430
1431static int hmac_sha256_init(struct ahash_request *req)
1432{
1433 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1434 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1435
1436 ctx->config.data_format = HASH_DATA_8_BITS;
1437 ctx->config.algorithm = HASH_ALGO_SHA256;
1438 ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
1439 ctx->digestsize = SHA256_DIGEST_SIZE;
1440
1441 return ux500_hash_init(req);
1442}
1443
1444static int hmac_sha1_digest(struct ahash_request *req)
1445{
1446 int ret2, ret1;
1447
1448 ret1 = hmac_sha1_init(req);
1449 if (ret1)
1450 goto out;
1451
1452 ret1 = ahash_update(req);
1453 ret2 = ahash_final(req);
1454
1455out:
1456 return ret1 ? ret1 : ret2;
1457}
1458
1459static int hmac_sha256_digest(struct ahash_request *req)
1460{
1461 int ret2, ret1;
1462
1463 ret1 = hmac_sha256_init(req);
1464 if (ret1)
1465 goto out;
1466
1467 ret1 = ahash_update(req);
1468 ret2 = ahash_final(req);
1469
1470out:
1471 return ret1 ? ret1 : ret2;
1472}
1473
1474static int hmac_sha1_setkey(struct crypto_ahash *tfm,
1475 const u8 *key, unsigned int keylen)
1476{
1477 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
1478}
1479
1480static int hmac_sha256_setkey(struct crypto_ahash *tfm,
1481 const u8 *key, unsigned int keylen)
1482{
1483 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
1484}
1485
1486struct hash_algo_template {
1487 struct hash_config conf;
1488 struct ahash_alg hash;
1489};
1490
1491static int hash_cra_init(struct crypto_tfm *tfm)
1492{
1493 struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
1494 struct crypto_alg *alg = tfm->__crt_alg;
1495 struct hash_algo_template *hash_alg;
1496
1497 hash_alg = container_of(__crypto_ahash_alg(alg),
1498 struct hash_algo_template,
1499 hash);
1500
1501 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1502 sizeof(struct hash_req_ctx));
1503
1504 ctx->config.data_format = HASH_DATA_8_BITS;
1505 ctx->config.algorithm = hash_alg->conf.algorithm;
1506 ctx->config.oper_mode = hash_alg->conf.oper_mode;
1507
1508 ctx->digestsize = hash_alg->hash.halg.digestsize;
1509
1510 return 0;
1511}
1512
1513static struct hash_algo_template hash_algs[] = {
1514 {
1515 .conf.algorithm = HASH_ALGO_SHA1,
1516 .conf.oper_mode = HASH_OPER_MODE_HASH,
1517 .hash = {
1518 .init = ux500_hash_init,
1519 .update = ahash_update,
1520 .final = ahash_final,
1521 .digest = ahash_sha1_digest,
1522 .export = ahash_noexport,
1523 .import = ahash_noimport,
1524 .halg.digestsize = SHA1_DIGEST_SIZE,
1525 .halg.statesize = sizeof(struct hash_ctx),
1526 .halg.base = {
1527 .cra_name = "sha1",
1528 .cra_driver_name = "sha1-ux500",
1529 .cra_flags = CRYPTO_ALG_ASYNC,
1530 .cra_blocksize = SHA1_BLOCK_SIZE,
1531 .cra_ctxsize = sizeof(struct hash_ctx),
1532 .cra_init = hash_cra_init,
1533 .cra_module = THIS_MODULE,
1534 }
1535 }
1536 },
1537 {
1538 .conf.algorithm = HASH_ALGO_SHA256,
1539 .conf.oper_mode = HASH_OPER_MODE_HASH,
1540 .hash = {
1541 .init = ux500_hash_init,
1542 .update = ahash_update,
1543 .final = ahash_final,
1544 .digest = ahash_sha256_digest,
1545 .export = ahash_noexport,
1546 .import = ahash_noimport,
1547 .halg.digestsize = SHA256_DIGEST_SIZE,
1548 .halg.statesize = sizeof(struct hash_ctx),
1549 .halg.base = {
1550 .cra_name = "sha256",
1551 .cra_driver_name = "sha256-ux500",
1552 .cra_flags = CRYPTO_ALG_ASYNC,
1553 .cra_blocksize = SHA256_BLOCK_SIZE,
1554 .cra_ctxsize = sizeof(struct hash_ctx),
1555 .cra_init = hash_cra_init,
1556 .cra_module = THIS_MODULE,
1557 }
1558 }
1559 },
1560 {
1561 .conf.algorithm = HASH_ALGO_SHA1,
1562 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1563 .hash = {
1564 .init = ux500_hash_init,
1565 .update = ahash_update,
1566 .final = ahash_final,
1567 .digest = hmac_sha1_digest,
1568 .setkey = hmac_sha1_setkey,
1569 .export = ahash_noexport,
1570 .import = ahash_noimport,
1571 .halg.digestsize = SHA1_DIGEST_SIZE,
1572 .halg.statesize = sizeof(struct hash_ctx),
1573 .halg.base = {
1574 .cra_name = "hmac(sha1)",
1575 .cra_driver_name = "hmac-sha1-ux500",
1576 .cra_flags = CRYPTO_ALG_ASYNC,
1577 .cra_blocksize = SHA1_BLOCK_SIZE,
1578 .cra_ctxsize = sizeof(struct hash_ctx),
1579 .cra_init = hash_cra_init,
1580 .cra_module = THIS_MODULE,
1581 }
1582 }
1583 },
1584 {
1585 .conf.algorithm = HASH_ALGO_SHA256,
1586 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1587 .hash = {
1588 .init = ux500_hash_init,
1589 .update = ahash_update,
1590 .final = ahash_final,
1591 .digest = hmac_sha256_digest,
1592 .setkey = hmac_sha256_setkey,
1593 .export = ahash_noexport,
1594 .import = ahash_noimport,
1595 .halg.digestsize = SHA256_DIGEST_SIZE,
1596 .halg.statesize = sizeof(struct hash_ctx),
1597 .halg.base = {
1598 .cra_name = "hmac(sha256)",
1599 .cra_driver_name = "hmac-sha256-ux500",
1600 .cra_flags = CRYPTO_ALG_ASYNC,
1601 .cra_blocksize = SHA256_BLOCK_SIZE,
1602 .cra_ctxsize = sizeof(struct hash_ctx),
1603 .cra_init = hash_cra_init,
1604 .cra_module = THIS_MODULE,
1605 }
1606 }
1607 }
1608};
1609
1610
1611
1612
1613static int ahash_algs_register_all(struct hash_device_data *device_data)
1614{
1615 int ret;
1616 int i;
1617 int count;
1618
1619 for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
1620 ret = crypto_register_ahash(&hash_algs[i].hash);
1621 if (ret) {
1622 count = i;
1623 dev_err(device_data->dev, "%s: alg registration failed\n",
1624 hash_algs[i].hash.halg.base.cra_driver_name);
1625 goto unreg;
1626 }
1627 }
1628 return 0;
1629unreg:
1630 for (i = 0; i < count; i++)
1631 crypto_unregister_ahash(&hash_algs[i].hash);
1632 return ret;
1633}
1634
1635
1636
1637
1638static void ahash_algs_unregister_all(struct hash_device_data *device_data)
1639{
1640 int i;
1641
1642 for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
1643 crypto_unregister_ahash(&hash_algs[i].hash);
1644}
1645
1646
1647
1648
1649
1650static int ux500_hash_probe(struct platform_device *pdev)
1651{
1652 int ret = 0;
1653 struct resource *res = NULL;
1654 struct hash_device_data *device_data;
1655 struct device *dev = &pdev->dev;
1656
1657 device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
1658 if (!device_data) {
1659 ret = -ENOMEM;
1660 goto out;
1661 }
1662
1663 device_data->dev = dev;
1664 device_data->current_ctx = NULL;
1665
1666 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1667 if (!res) {
1668 dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
1669 ret = -ENODEV;
1670 goto out;
1671 }
1672
1673 device_data->phybase = res->start;
1674 device_data->base = devm_ioremap_resource(dev, res);
1675 if (IS_ERR(device_data->base)) {
1676 dev_err(dev, "%s: ioremap() failed!\n", __func__);
1677 ret = PTR_ERR(device_data->base);
1678 goto out;
1679 }
1680 spin_lock_init(&device_data->ctx_lock);
1681 spin_lock_init(&device_data->power_state_lock);
1682
1683
1684 device_data->regulator = regulator_get(dev, "v-ape");
1685 if (IS_ERR(device_data->regulator)) {
1686 dev_err(dev, "%s: regulator_get() failed!\n", __func__);
1687 ret = PTR_ERR(device_data->regulator);
1688 device_data->regulator = NULL;
1689 goto out;
1690 }
1691
1692
1693 device_data->clk = devm_clk_get(dev, NULL);
1694 if (IS_ERR(device_data->clk)) {
1695 dev_err(dev, "%s: clk_get() failed!\n", __func__);
1696 ret = PTR_ERR(device_data->clk);
1697 goto out_regulator;
1698 }
1699
1700 ret = clk_prepare(device_data->clk);
1701 if (ret) {
1702 dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
1703 goto out_regulator;
1704 }
1705
1706
1707 ret = hash_enable_power(device_data, false);
1708 if (ret) {
1709 dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1710 goto out_clk_unprepare;
1711 }
1712
1713 ret = hash_check_hw(device_data);
1714 if (ret) {
1715 dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
1716 goto out_power;
1717 }
1718
1719 if (hash_mode == HASH_MODE_DMA)
1720 hash_dma_setup_channel(device_data, dev);
1721
1722 platform_set_drvdata(pdev, device_data);
1723
1724
1725 klist_add_tail(&device_data->list_node, &driver_data.device_list);
1726
1727 up(&driver_data.device_allocation);
1728
1729 ret = ahash_algs_register_all(device_data);
1730 if (ret) {
1731 dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
1732 __func__);
1733 goto out_power;
1734 }
1735
1736 dev_info(dev, "successfully registered\n");
1737 return 0;
1738
1739out_power:
1740 hash_disable_power(device_data, false);
1741
1742out_clk_unprepare:
1743 clk_unprepare(device_data->clk);
1744
1745out_regulator:
1746 regulator_put(device_data->regulator);
1747
1748out:
1749 return ret;
1750}
1751
1752
1753
1754
1755
1756static int ux500_hash_remove(struct platform_device *pdev)
1757{
1758 struct hash_device_data *device_data;
1759 struct device *dev = &pdev->dev;
1760
1761 device_data = platform_get_drvdata(pdev);
1762 if (!device_data) {
1763 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1764 return -ENOMEM;
1765 }
1766
1767
1768 if (down_trylock(&driver_data.device_allocation))
1769 return -EBUSY;
1770
1771
1772 spin_lock(&device_data->ctx_lock);
1773
1774 if (device_data->current_ctx) {
1775
1776 spin_unlock(&device_data->ctx_lock);
1777
1778 up(&driver_data.device_allocation);
1779 return -EBUSY;
1780 }
1781
1782 spin_unlock(&device_data->ctx_lock);
1783
1784
1785 if (klist_node_attached(&device_data->list_node))
1786 klist_remove(&device_data->list_node);
1787
1788
1789 if (list_empty(&driver_data.device_list.k_list))
1790 ahash_algs_unregister_all(device_data);
1791
1792 if (hash_disable_power(device_data, false))
1793 dev_err(dev, "%s: hash_disable_power() failed\n",
1794 __func__);
1795
1796 clk_unprepare(device_data->clk);
1797 regulator_put(device_data->regulator);
1798
1799 return 0;
1800}
1801
1802
1803
1804
1805
1806static void ux500_hash_shutdown(struct platform_device *pdev)
1807{
1808 struct hash_device_data *device_data;
1809
1810 device_data = platform_get_drvdata(pdev);
1811 if (!device_data) {
1812 dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
1813 __func__);
1814 return;
1815 }
1816
1817
1818 spin_lock(&device_data->ctx_lock);
1819
1820 if (!device_data->current_ctx) {
1821 if (down_trylock(&driver_data.device_allocation))
1822 dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
1823 __func__);
1824
1825
1826
1827
1828
1829 device_data->current_ctx++;
1830 }
1831 spin_unlock(&device_data->ctx_lock);
1832
1833
1834 if (klist_node_attached(&device_data->list_node))
1835 klist_remove(&device_data->list_node);
1836
1837
1838 if (list_empty(&driver_data.device_list.k_list))
1839 ahash_algs_unregister_all(device_data);
1840
1841 if (hash_disable_power(device_data, false))
1842 dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
1843 __func__);
1844}
1845
1846#ifdef CONFIG_PM_SLEEP
1847
1848
1849
1850
1851static int ux500_hash_suspend(struct device *dev)
1852{
1853 int ret;
1854 struct hash_device_data *device_data;
1855 struct hash_ctx *temp_ctx = NULL;
1856
1857 device_data = dev_get_drvdata(dev);
1858 if (!device_data) {
1859 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1860 return -ENOMEM;
1861 }
1862
1863 spin_lock(&device_data->ctx_lock);
1864 if (!device_data->current_ctx)
1865 device_data->current_ctx++;
1866 spin_unlock(&device_data->ctx_lock);
1867
1868 if (device_data->current_ctx == ++temp_ctx) {
1869 if (down_interruptible(&driver_data.device_allocation))
1870 dev_dbg(dev, "%s: down_interruptible() failed\n",
1871 __func__);
1872 ret = hash_disable_power(device_data, false);
1873
1874 } else {
1875 ret = hash_disable_power(device_data, true);
1876 }
1877
1878 if (ret)
1879 dev_err(dev, "%s: hash_disable_power()\n", __func__);
1880
1881 return ret;
1882}
1883
1884
1885
1886
1887
1888static int ux500_hash_resume(struct device *dev)
1889{
1890 int ret = 0;
1891 struct hash_device_data *device_data;
1892 struct hash_ctx *temp_ctx = NULL;
1893
1894 device_data = dev_get_drvdata(dev);
1895 if (!device_data) {
1896 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1897 return -ENOMEM;
1898 }
1899
1900 spin_lock(&device_data->ctx_lock);
1901 if (device_data->current_ctx == ++temp_ctx)
1902 device_data->current_ctx = NULL;
1903 spin_unlock(&device_data->ctx_lock);
1904
1905 if (!device_data->current_ctx)
1906 up(&driver_data.device_allocation);
1907 else
1908 ret = hash_enable_power(device_data, true);
1909
1910 if (ret)
1911 dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1912
1913 return ret;
1914}
1915#endif
1916
1917static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
1918
1919static const struct of_device_id ux500_hash_match[] = {
1920 { .compatible = "stericsson,ux500-hash" },
1921 { },
1922};
1923MODULE_DEVICE_TABLE(of, ux500_hash_match);
1924
1925static struct platform_driver hash_driver = {
1926 .probe = ux500_hash_probe,
1927 .remove = ux500_hash_remove,
1928 .shutdown = ux500_hash_shutdown,
1929 .driver = {
1930 .name = "hash1",
1931 .of_match_table = ux500_hash_match,
1932 .pm = &ux500_hash_pm,
1933 }
1934};
1935
1936
1937
1938
1939static int __init ux500_hash_mod_init(void)
1940{
1941 klist_init(&driver_data.device_list, NULL, NULL);
1942
1943 sema_init(&driver_data.device_allocation, 0);
1944
1945 return platform_driver_register(&hash_driver);
1946}
1947
1948
1949
1950
1951static void __exit ux500_hash_mod_fini(void)
1952{
1953 platform_driver_unregister(&hash_driver);
1954}
1955
1956module_init(ux500_hash_mod_init);
1957module_exit(ux500_hash_mod_fini);
1958
1959MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
1960MODULE_LICENSE("GPL");
1961
1962MODULE_ALIAS_CRYPTO("sha1-all");
1963MODULE_ALIAS_CRYPTO("sha256-all");
1964MODULE_ALIAS_CRYPTO("hmac-sha1-all");
1965MODULE_ALIAS_CRYPTO("hmac-sha256-all");
1966