1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <crypto/internal/hash.h>
23#include <crypto/hash.h>
24#include <crypto/aes.h>
25#include <crypto/sha.h>
26#include <crypto/algapi.h>
27#include <crypto/scatterwalk.h>
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/types.h>
31#include <linux/mm.h>
32#include <linux/crypto.h>
33#include <linux/scatterlist.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/types.h>
37#include <asm/hvcall.h>
38#include <asm/vio.h>
39
40#include "nx_csbcpb.h"
41#include "nx.h"
42
43
44
45
46
47
48
49
50
51
52
53
54int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
55 struct vio_pfo_op *op,
56 u32 may_sleep)
57{
58 int rc, retries = 10;
59 struct vio_dev *viodev = nx_driver.viodev;
60
61 atomic_inc(&(nx_ctx->stats->sync_ops));
62
63 do {
64 rc = vio_h_cop_sync(viodev, op);
65 } while (rc == -EBUSY && !may_sleep && retries--);
66
67 if (rc) {
68 dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
69 "hcall rc: %ld\n", rc, op->hcall_err);
70 atomic_inc(&(nx_ctx->stats->errors));
71 atomic_set(&(nx_ctx->stats->last_error), op->hcall_err);
72 atomic_set(&(nx_ctx->stats->last_error_pid), current->pid);
73 }
74
75 return rc;
76}
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
93 u8 *start_addr,
94 unsigned int *len,
95 u32 sgmax)
96{
97 unsigned int sg_len = 0;
98 struct nx_sg *sg;
99 u64 sg_addr = (u64)start_addr;
100 u64 end_addr;
101
102
103
104 if (is_vmalloc_addr(start_addr))
105 sg_addr = page_to_phys(vmalloc_to_page(start_addr))
106 + offset_in_page(sg_addr);
107 else
108 sg_addr = __pa(sg_addr);
109
110 end_addr = sg_addr + *len;
111
112
113
114
115
116
117
118
119
120
121
122 for (sg = sg_head; sg_len < *len; sg++) {
123 u64 next_page;
124
125 sg->addr = sg_addr;
126 sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE),
127 end_addr);
128
129 next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE;
130 sg->len = min_t(u64, sg_addr, next_page) - sg->addr;
131 sg_len += sg->len;
132
133 if (sg_addr >= next_page &&
134 is_vmalloc_addr(start_addr + sg_len)) {
135 sg_addr = page_to_phys(vmalloc_to_page(
136 start_addr + sg_len));
137 end_addr = sg_addr + *len - sg_len;
138 }
139
140 if ((sg - sg_head) == sgmax) {
141 pr_err("nx: scatter/gather list overflow, pid: %d\n",
142 current->pid);
143 sg++;
144 break;
145 }
146 }
147 *len = sg_len;
148
149
150 return sg;
151}
152
153
154
155
156
157
158
159
160
161
162struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
163 unsigned int sglen,
164 struct scatterlist *sg_src,
165 unsigned int start,
166 unsigned int *src_len)
167{
168 struct scatter_walk walk;
169 struct nx_sg *nx_sg = nx_dst;
170 unsigned int n, offset = 0, len = *src_len;
171 char *dst;
172
173
174 for (;;) {
175 scatterwalk_start(&walk, sg_src);
176
177 if (start < offset + sg_src->length)
178 break;
179
180 offset += sg_src->length;
181 sg_src = scatterwalk_sg_next(sg_src);
182 }
183
184
185
186 scatterwalk_advance(&walk, start - offset);
187
188 while (len && (nx_sg - nx_dst) < sglen) {
189 n = scatterwalk_clamp(&walk, len);
190 if (!n) {
191
192
193 scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
194 n = scatterwalk_clamp(&walk, len);
195 }
196 dst = scatterwalk_map(&walk);
197
198 nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
199 len -= n;
200
201 scatterwalk_unmap(dst);
202 scatterwalk_advance(&walk, n);
203 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
204 }
205
206 *src_len -= len;
207
208
209 return nx_sg;
210}
211
212
213
214
215
216
217
218
219static long int trim_sg_list(struct nx_sg *sg,
220 struct nx_sg *end,
221 unsigned int delta,
222 unsigned int *nbytes)
223{
224 long int oplen;
225 long int data_back;
226 unsigned int is_delta = delta;
227
228 while (delta && end > sg) {
229 struct nx_sg *last = end - 1;
230
231 if (last->len > delta) {
232 last->len -= delta;
233 delta = 0;
234 } else {
235 end--;
236 delta -= last->len;
237 }
238 }
239
240
241
242
243
244
245 oplen = (sg - end) * sizeof(struct nx_sg);
246 if (is_delta) {
247 data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len;
248 data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
249 *nbytes -= data_back;
250 }
251
252 return oplen;
253}
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
273 struct blkcipher_desc *desc,
274 struct scatterlist *dst,
275 struct scatterlist *src,
276 unsigned int *nbytes,
277 unsigned int offset,
278 u8 *iv)
279{
280 unsigned int delta = 0;
281 unsigned int total = *nbytes;
282 struct nx_sg *nx_insg = nx_ctx->in_sg;
283 struct nx_sg *nx_outsg = nx_ctx->out_sg;
284 unsigned int max_sg_len;
285
286 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
287 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
288 max_sg_len = min_t(u64, max_sg_len,
289 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
290
291 if (iv)
292 memcpy(iv, desc->info, AES_BLOCK_SIZE);
293
294 *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
295
296 nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst,
297 offset, nbytes);
298 nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src,
299 offset, nbytes);
300
301 if (*nbytes < total)
302 delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1));
303
304
305
306
307 nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
308 nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes);
309
310 return 0;
311}
312
313
314
315
316
317
318
319void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
320{
321 spin_lock_init(&nx_ctx->lock);
322 memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
323 nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
324
325 nx_ctx->op.flags = function;
326 nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb);
327 nx_ctx->op.in = __pa(nx_ctx->in_sg);
328 nx_ctx->op.out = __pa(nx_ctx->out_sg);
329
330 if (nx_ctx->csbcpb_aead) {
331 nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT;
332
333 nx_ctx->op_aead.flags = function;
334 nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead);
335 nx_ctx->op_aead.in = __pa(nx_ctx->in_sg);
336 nx_ctx->op_aead.out = __pa(nx_ctx->out_sg);
337 }
338}
339
340static void nx_of_update_status(struct device *dev,
341 struct property *p,
342 struct nx_of *props)
343{
344 if (!strncmp(p->value, "okay", p->length)) {
345 props->status = NX_WAITING;
346 props->flags |= NX_OF_FLAG_STATUS_SET;
347 } else {
348 dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__,
349 (char *)p->value);
350 }
351}
352
353static void nx_of_update_sglen(struct device *dev,
354 struct property *p,
355 struct nx_of *props)
356{
357 if (p->length != sizeof(props->max_sg_len)) {
358 dev_err(dev, "%s: unexpected format for "
359 "ibm,max-sg-len property\n", __func__);
360 dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes "
361 "long, expected %zd bytes\n", __func__,
362 p->length, sizeof(props->max_sg_len));
363 return;
364 }
365
366 props->max_sg_len = *(u32 *)p->value;
367 props->flags |= NX_OF_FLAG_MAXSGLEN_SET;
368}
369
370static void nx_of_update_msc(struct device *dev,
371 struct property *p,
372 struct nx_of *props)
373{
374 struct msc_triplet *trip;
375 struct max_sync_cop *msc;
376 unsigned int bytes_so_far, i, lenp;
377
378 msc = (struct max_sync_cop *)p->value;
379 lenp = p->length;
380
381
382
383
384
385 bytes_so_far = 0;
386
387 while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) {
388 bytes_so_far += sizeof(struct max_sync_cop);
389
390 trip = msc->trip;
391
392 for (i = 0;
393 ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
394 i < msc->triplets;
395 i++) {
396 if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) {
397 dev_err(dev, "unknown function code/mode "
398 "combo: %d/%d (ignored)\n", msc->fc,
399 msc->mode);
400 goto next_loop;
401 }
402
403 if (!trip->sglen || trip->databytelen < NX_PAGE_SIZE) {
404 dev_warn(dev, "bogus sglen/databytelen: "
405 "%u/%u (ignored)\n", trip->sglen,
406 trip->databytelen);
407 goto next_loop;
408 }
409
410 switch (trip->keybitlen) {
411 case 128:
412 case 160:
413 props->ap[msc->fc][msc->mode][0].databytelen =
414 trip->databytelen;
415 props->ap[msc->fc][msc->mode][0].sglen =
416 trip->sglen;
417 break;
418 case 192:
419 props->ap[msc->fc][msc->mode][1].databytelen =
420 trip->databytelen;
421 props->ap[msc->fc][msc->mode][1].sglen =
422 trip->sglen;
423 break;
424 case 256:
425 if (msc->fc == NX_FC_AES) {
426 props->ap[msc->fc][msc->mode][2].
427 databytelen = trip->databytelen;
428 props->ap[msc->fc][msc->mode][2].sglen =
429 trip->sglen;
430 } else if (msc->fc == NX_FC_AES_HMAC ||
431 msc->fc == NX_FC_SHA) {
432 props->ap[msc->fc][msc->mode][1].
433 databytelen = trip->databytelen;
434 props->ap[msc->fc][msc->mode][1].sglen =
435 trip->sglen;
436 } else {
437 dev_warn(dev, "unknown function "
438 "code/key bit len combo"
439 ": (%u/256)\n", msc->fc);
440 }
441 break;
442 case 512:
443 props->ap[msc->fc][msc->mode][2].databytelen =
444 trip->databytelen;
445 props->ap[msc->fc][msc->mode][2].sglen =
446 trip->sglen;
447 break;
448 default:
449 dev_warn(dev, "unknown function code/key bit "
450 "len combo: (%u/%u)\n", msc->fc,
451 trip->keybitlen);
452 break;
453 }
454next_loop:
455 bytes_so_far += sizeof(struct msc_triplet);
456 trip++;
457 }
458
459 msc = (struct max_sync_cop *)trip;
460 }
461
462 props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET;
463}
464
465
466
467
468
469
470
471
472
473
474
475
476static void nx_of_init(struct device *dev, struct nx_of *props)
477{
478 struct device_node *base_node = dev->of_node;
479 struct property *p;
480
481 p = of_find_property(base_node, "status", NULL);
482 if (!p)
483 dev_info(dev, "%s: property 'status' not found\n", __func__);
484 else
485 nx_of_update_status(dev, p, props);
486
487 p = of_find_property(base_node, "ibm,max-sg-len", NULL);
488 if (!p)
489 dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n",
490 __func__);
491 else
492 nx_of_update_sglen(dev, p, props);
493
494 p = of_find_property(base_node, "ibm,max-sync-cop", NULL);
495 if (!p)
496 dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n",
497 __func__);
498 else
499 nx_of_update_msc(dev, p, props);
500}
501
502static bool nx_check_prop(struct device *dev, u32 fc, u32 mode, int slot)
503{
504 struct alg_props *props = &nx_driver.of.ap[fc][mode][slot];
505
506 if (!props->sglen || props->databytelen < NX_PAGE_SIZE) {
507 if (dev)
508 dev_warn(dev, "bogus sglen/databytelen for %u/%u/%u: "
509 "%u/%u (ignored)\n", fc, mode, slot,
510 props->sglen, props->databytelen);
511 return false;
512 }
513
514 return true;
515}
516
517static bool nx_check_props(struct device *dev, u32 fc, u32 mode)
518{
519 int i;
520
521 for (i = 0; i < 3; i++)
522 if (!nx_check_prop(dev, fc, mode, i))
523 return false;
524
525 return true;
526}
527
528static int nx_register_alg(struct crypto_alg *alg, u32 fc, u32 mode)
529{
530 return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
531 crypto_register_alg(alg) : 0;
532}
533
534static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot)
535{
536 return (slot >= 0 ? nx_check_prop(&nx_driver.viodev->dev,
537 fc, mode, slot) :
538 nx_check_props(&nx_driver.viodev->dev, fc, mode)) ?
539 crypto_register_shash(alg) : 0;
540}
541
542static void nx_unregister_alg(struct crypto_alg *alg, u32 fc, u32 mode)
543{
544 if (nx_check_props(NULL, fc, mode))
545 crypto_unregister_alg(alg);
546}
547
548static void nx_unregister_shash(struct shash_alg *alg, u32 fc, u32 mode,
549 int slot)
550{
551 if (slot >= 0 ? nx_check_prop(NULL, fc, mode, slot) :
552 nx_check_props(NULL, fc, mode))
553 crypto_unregister_shash(alg);
554}
555
556
557
558
559
560
561
562
563
564
565static int nx_register_algs(void)
566{
567 int rc = -1;
568
569 if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY)
570 goto out;
571
572 memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
573
574 rc = NX_DEBUGFS_INIT(&nx_driver);
575 if (rc)
576 goto out;
577
578 nx_driver.of.status = NX_OKAY;
579
580 rc = nx_register_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
581 if (rc)
582 goto out;
583
584 rc = nx_register_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
585 if (rc)
586 goto out_unreg_ecb;
587
588 rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
589 if (rc)
590 goto out_unreg_cbc;
591
592 rc = nx_register_alg(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
593 if (rc)
594 goto out_unreg_ctr3686;
595
596 rc = nx_register_alg(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
597 if (rc)
598 goto out_unreg_gcm;
599
600 rc = nx_register_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
601 if (rc)
602 goto out_unreg_gcm4106;
603
604 rc = nx_register_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
605 if (rc)
606 goto out_unreg_ccm;
607
608 rc = nx_register_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
609 NX_PROPS_SHA256);
610 if (rc)
611 goto out_unreg_ccm4309;
612
613 rc = nx_register_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
614 NX_PROPS_SHA512);
615 if (rc)
616 goto out_unreg_s256;
617
618 rc = nx_register_shash(&nx_shash_aes_xcbc_alg,
619 NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
620 if (rc)
621 goto out_unreg_s512;
622
623 goto out;
624
625out_unreg_s512:
626 nx_unregister_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
627 NX_PROPS_SHA512);
628out_unreg_s256:
629 nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
630 NX_PROPS_SHA256);
631out_unreg_ccm4309:
632 nx_unregister_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
633out_unreg_ccm:
634 nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
635out_unreg_gcm4106:
636 nx_unregister_alg(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
637out_unreg_gcm:
638 nx_unregister_alg(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
639out_unreg_ctr3686:
640 nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
641out_unreg_cbc:
642 nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
643out_unreg_ecb:
644 nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
645out:
646 return rc;
647}
648
649
650
651
652
653
654
655
656static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
657{
658 if (nx_driver.of.status != NX_OKAY) {
659 pr_err("Attempt to initialize NX crypto context while device "
660 "is not available!\n");
661 return -ENODEV;
662 }
663
664
665 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
666 nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) +
667 sizeof(struct nx_csbcpb);
668 else
669 nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
670 sizeof(struct nx_csbcpb);
671
672 nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
673 if (!nx_ctx->kmem)
674 return -ENOMEM;
675
676
677 nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem,
678 (u64)NX_PAGE_SIZE));
679 nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE);
680 nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE);
681
682 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
683 nx_ctx->csbcpb_aead =
684 (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg +
685 NX_PAGE_SIZE);
686
687
688
689 nx_ctx->stats = &nx_driver.stats;
690 memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode],
691 sizeof(struct alg_props) * 3);
692
693 return 0;
694}
695
696
697int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
698{
699 tfm->crt_aead.reqsize = sizeof(struct nx_ccm_rctx);
700 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
701 NX_MODE_AES_CCM);
702}
703
704int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm)
705{
706 tfm->crt_aead.reqsize = sizeof(struct nx_gcm_rctx);
707 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
708 NX_MODE_AES_GCM);
709}
710
711int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm)
712{
713 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
714 NX_MODE_AES_CTR);
715}
716
717int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm)
718{
719 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
720 NX_MODE_AES_CBC);
721}
722
723int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm)
724{
725 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
726 NX_MODE_AES_ECB);
727}
728
729int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
730{
731 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
732}
733
734int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
735{
736 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
737 NX_MODE_AES_XCBC_MAC);
738}
739
740
741
742
743
744
745
746
747
748void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
749{
750 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
751
752 kzfree(nx_ctx->kmem);
753 nx_ctx->csbcpb = NULL;
754 nx_ctx->csbcpb_aead = NULL;
755 nx_ctx->in_sg = NULL;
756 nx_ctx->out_sg = NULL;
757}
758
759static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
760{
761 dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
762 viodev->name, viodev->resource_id);
763
764 if (nx_driver.viodev) {
765 dev_err(&viodev->dev, "%s: Attempt to register more than one "
766 "instance of the hardware\n", __func__);
767 return -EINVAL;
768 }
769
770 nx_driver.viodev = viodev;
771
772 nx_of_init(&viodev->dev, &nx_driver.of);
773
774 return nx_register_algs();
775}
776
777static int nx_remove(struct vio_dev *viodev)
778{
779 dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n",
780 viodev->unit_address);
781
782 if (nx_driver.of.status == NX_OKAY) {
783 NX_DEBUGFS_FINI(&nx_driver);
784
785 nx_unregister_shash(&nx_shash_aes_xcbc_alg,
786 NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
787 nx_unregister_shash(&nx_shash_sha512_alg,
788 NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
789 nx_unregister_shash(&nx_shash_sha256_alg,
790 NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
791 nx_unregister_alg(&nx_ccm4309_aes_alg,
792 NX_FC_AES, NX_MODE_AES_CCM);
793 nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
794 nx_unregister_alg(&nx_gcm4106_aes_alg,
795 NX_FC_AES, NX_MODE_AES_GCM);
796 nx_unregister_alg(&nx_gcm_aes_alg,
797 NX_FC_AES, NX_MODE_AES_GCM);
798 nx_unregister_alg(&nx_ctr3686_aes_alg,
799 NX_FC_AES, NX_MODE_AES_CTR);
800 nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
801 nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
802 }
803
804 return 0;
805}
806
807
808
809static int __init nx_init(void)
810{
811 return vio_register_driver(&nx_driver.viodriver);
812}
813
814static void __exit nx_fini(void)
815{
816 vio_unregister_driver(&nx_driver.viodriver);
817}
818
819static struct vio_device_id nx_crypto_driver_ids[] = {
820 { "ibm,sym-encryption-v1", "ibm,sym-encryption" },
821 { "", "" }
822};
823MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids);
824
825
826struct nx_crypto_driver nx_driver = {
827 .viodriver = {
828 .id_table = nx_crypto_driver_ids,
829 .probe = nx_probe,
830 .remove = nx_remove,
831 .name = NX_NAME,
832 },
833};
834
835module_init(nx_init);
836module_exit(nx_fini);
837
838MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>");
839MODULE_DESCRIPTION(NX_STRING);
840MODULE_LICENSE("GPL");
841MODULE_VERSION(NX_VERSION);
842