1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <crypto/internal/aead.h>
23#include <crypto/internal/hash.h>
24#include <crypto/aes.h>
25#include <crypto/sha.h>
26#include <crypto/algapi.h>
27#include <crypto/scatterwalk.h>
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/types.h>
31#include <linux/mm.h>
32#include <linux/scatterlist.h>
33#include <linux/device.h>
34#include <linux/of.h>
35#include <linux/types.h>
36#include <asm/hvcall.h>
37#include <asm/vio.h>
38
39#include "nx_csbcpb.h"
40#include "nx.h"
41
42
43
44
45
46
47
48
49
50
51
52
53int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
54 struct vio_pfo_op *op,
55 u32 may_sleep)
56{
57 int rc, retries = 10;
58 struct vio_dev *viodev = nx_driver.viodev;
59
60 atomic_inc(&(nx_ctx->stats->sync_ops));
61
62 do {
63 rc = vio_h_cop_sync(viodev, op);
64 } while (rc == -EBUSY && !may_sleep && retries--);
65
66 if (rc) {
67 dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
68 "hcall rc: %ld\n", rc, op->hcall_err);
69 atomic_inc(&(nx_ctx->stats->errors));
70 atomic_set(&(nx_ctx->stats->last_error), op->hcall_err);
71 atomic_set(&(nx_ctx->stats->last_error_pid), current->pid);
72 }
73
74 return rc;
75}
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
92 u8 *start_addr,
93 unsigned int *len,
94 u32 sgmax)
95{
96 unsigned int sg_len = 0;
97 struct nx_sg *sg;
98 u64 sg_addr = (u64)start_addr;
99 u64 end_addr;
100
101
102
103 if (is_vmalloc_addr(start_addr))
104 sg_addr = page_to_phys(vmalloc_to_page(start_addr))
105 + offset_in_page(sg_addr);
106 else
107 sg_addr = __pa(sg_addr);
108
109 end_addr = sg_addr + *len;
110
111
112
113
114
115
116
117
118
119
120
121 for (sg = sg_head; sg_len < *len; sg++) {
122 u64 next_page;
123
124 sg->addr = sg_addr;
125 sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE),
126 end_addr);
127
128 next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE;
129 sg->len = min_t(u64, sg_addr, next_page) - sg->addr;
130 sg_len += sg->len;
131
132 if (sg_addr >= next_page &&
133 is_vmalloc_addr(start_addr + sg_len)) {
134 sg_addr = page_to_phys(vmalloc_to_page(
135 start_addr + sg_len));
136 end_addr = sg_addr + *len - sg_len;
137 }
138
139 if ((sg - sg_head) == sgmax) {
140 pr_err("nx: scatter/gather list overflow, pid: %d\n",
141 current->pid);
142 sg++;
143 break;
144 }
145 }
146 *len = sg_len;
147
148
149 return sg;
150}
151
152
153
154
155
156
157
158
159
160
161struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
162 unsigned int sglen,
163 struct scatterlist *sg_src,
164 unsigned int start,
165 unsigned int *src_len)
166{
167 struct scatter_walk walk;
168 struct nx_sg *nx_sg = nx_dst;
169 unsigned int n, offset = 0, len = *src_len;
170 char *dst;
171
172
173 for (;;) {
174 scatterwalk_start(&walk, sg_src);
175
176 if (start < offset + sg_src->length)
177 break;
178
179 offset += sg_src->length;
180 sg_src = sg_next(sg_src);
181 }
182
183
184
185 scatterwalk_advance(&walk, start - offset);
186
187 while (len && (nx_sg - nx_dst) < sglen) {
188 n = scatterwalk_clamp(&walk, len);
189 if (!n) {
190
191
192 scatterwalk_start(&walk, sg_next(walk.sg));
193 n = scatterwalk_clamp(&walk, len);
194 }
195 dst = scatterwalk_map(&walk);
196
197 nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
198 len -= n;
199
200 scatterwalk_unmap(dst);
201 scatterwalk_advance(&walk, n);
202 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
203 }
204
205 *src_len -= len;
206
207
208 return nx_sg;
209}
210
211
212
213
214
215
216
217
218static long int trim_sg_list(struct nx_sg *sg,
219 struct nx_sg *end,
220 unsigned int delta,
221 unsigned int *nbytes)
222{
223 long int oplen;
224 long int data_back;
225 unsigned int is_delta = delta;
226
227 while (delta && end > sg) {
228 struct nx_sg *last = end - 1;
229
230 if (last->len > delta) {
231 last->len -= delta;
232 delta = 0;
233 } else {
234 end--;
235 delta -= last->len;
236 }
237 }
238
239
240
241
242
243
244 oplen = (sg - end) * sizeof(struct nx_sg);
245 if (is_delta) {
246 data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len;
247 data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
248 *nbytes -= data_back;
249 }
250
251 return oplen;
252}
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
272 struct blkcipher_desc *desc,
273 struct scatterlist *dst,
274 struct scatterlist *src,
275 unsigned int *nbytes,
276 unsigned int offset,
277 u8 *iv)
278{
279 unsigned int delta = 0;
280 unsigned int total = *nbytes;
281 struct nx_sg *nx_insg = nx_ctx->in_sg;
282 struct nx_sg *nx_outsg = nx_ctx->out_sg;
283 unsigned int max_sg_len;
284
285 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
286 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
287 max_sg_len = min_t(u64, max_sg_len,
288 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
289
290 if (iv)
291 memcpy(iv, desc->info, AES_BLOCK_SIZE);
292
293 *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
294
295 nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst,
296 offset, nbytes);
297 nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src,
298 offset, nbytes);
299
300 if (*nbytes < total)
301 delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1));
302
303
304
305
306 nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
307 nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes);
308
309 return 0;
310}
311
312
313
314
315
316
317
318void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
319{
320 spin_lock_init(&nx_ctx->lock);
321 memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
322 nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
323
324 nx_ctx->op.flags = function;
325 nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb);
326 nx_ctx->op.in = __pa(nx_ctx->in_sg);
327 nx_ctx->op.out = __pa(nx_ctx->out_sg);
328
329 if (nx_ctx->csbcpb_aead) {
330 nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT;
331
332 nx_ctx->op_aead.flags = function;
333 nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead);
334 nx_ctx->op_aead.in = __pa(nx_ctx->in_sg);
335 nx_ctx->op_aead.out = __pa(nx_ctx->out_sg);
336 }
337}
338
339static void nx_of_update_status(struct device *dev,
340 struct property *p,
341 struct nx_of *props)
342{
343 if (!strncmp(p->value, "okay", p->length)) {
344 props->status = NX_WAITING;
345 props->flags |= NX_OF_FLAG_STATUS_SET;
346 } else {
347 dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__,
348 (char *)p->value);
349 }
350}
351
352static void nx_of_update_sglen(struct device *dev,
353 struct property *p,
354 struct nx_of *props)
355{
356 if (p->length != sizeof(props->max_sg_len)) {
357 dev_err(dev, "%s: unexpected format for "
358 "ibm,max-sg-len property\n", __func__);
359 dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes "
360 "long, expected %zd bytes\n", __func__,
361 p->length, sizeof(props->max_sg_len));
362 return;
363 }
364
365 props->max_sg_len = *(u32 *)p->value;
366 props->flags |= NX_OF_FLAG_MAXSGLEN_SET;
367}
368
369static void nx_of_update_msc(struct device *dev,
370 struct property *p,
371 struct nx_of *props)
372{
373 struct msc_triplet *trip;
374 struct max_sync_cop *msc;
375 unsigned int bytes_so_far, i, lenp;
376
377 msc = (struct max_sync_cop *)p->value;
378 lenp = p->length;
379
380
381
382
383
384 bytes_so_far = 0;
385
386 while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) {
387 bytes_so_far += sizeof(struct max_sync_cop);
388
389 trip = msc->trip;
390
391 for (i = 0;
392 ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
393 i < msc->triplets;
394 i++) {
395 if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) {
396 dev_err(dev, "unknown function code/mode "
397 "combo: %d/%d (ignored)\n", msc->fc,
398 msc->mode);
399 goto next_loop;
400 }
401
402 if (!trip->sglen || trip->databytelen < NX_PAGE_SIZE) {
403 dev_warn(dev, "bogus sglen/databytelen: "
404 "%u/%u (ignored)\n", trip->sglen,
405 trip->databytelen);
406 goto next_loop;
407 }
408
409 switch (trip->keybitlen) {
410 case 128:
411 case 160:
412 props->ap[msc->fc][msc->mode][0].databytelen =
413 trip->databytelen;
414 props->ap[msc->fc][msc->mode][0].sglen =
415 trip->sglen;
416 break;
417 case 192:
418 props->ap[msc->fc][msc->mode][1].databytelen =
419 trip->databytelen;
420 props->ap[msc->fc][msc->mode][1].sglen =
421 trip->sglen;
422 break;
423 case 256:
424 if (msc->fc == NX_FC_AES) {
425 props->ap[msc->fc][msc->mode][2].
426 databytelen = trip->databytelen;
427 props->ap[msc->fc][msc->mode][2].sglen =
428 trip->sglen;
429 } else if (msc->fc == NX_FC_AES_HMAC ||
430 msc->fc == NX_FC_SHA) {
431 props->ap[msc->fc][msc->mode][1].
432 databytelen = trip->databytelen;
433 props->ap[msc->fc][msc->mode][1].sglen =
434 trip->sglen;
435 } else {
436 dev_warn(dev, "unknown function "
437 "code/key bit len combo"
438 ": (%u/256)\n", msc->fc);
439 }
440 break;
441 case 512:
442 props->ap[msc->fc][msc->mode][2].databytelen =
443 trip->databytelen;
444 props->ap[msc->fc][msc->mode][2].sglen =
445 trip->sglen;
446 break;
447 default:
448 dev_warn(dev, "unknown function code/key bit "
449 "len combo: (%u/%u)\n", msc->fc,
450 trip->keybitlen);
451 break;
452 }
453next_loop:
454 bytes_so_far += sizeof(struct msc_triplet);
455 trip++;
456 }
457
458 msc = (struct max_sync_cop *)trip;
459 }
460
461 props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET;
462}
463
464
465
466
467
468
469
470
471
472
473
474
475static void nx_of_init(struct device *dev, struct nx_of *props)
476{
477 struct device_node *base_node = dev->of_node;
478 struct property *p;
479
480 p = of_find_property(base_node, "status", NULL);
481 if (!p)
482 dev_info(dev, "%s: property 'status' not found\n", __func__);
483 else
484 nx_of_update_status(dev, p, props);
485
486 p = of_find_property(base_node, "ibm,max-sg-len", NULL);
487 if (!p)
488 dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n",
489 __func__);
490 else
491 nx_of_update_sglen(dev, p, props);
492
493 p = of_find_property(base_node, "ibm,max-sync-cop", NULL);
494 if (!p)
495 dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n",
496 __func__);
497 else
498 nx_of_update_msc(dev, p, props);
499}
500
501static bool nx_check_prop(struct device *dev, u32 fc, u32 mode, int slot)
502{
503 struct alg_props *props = &nx_driver.of.ap[fc][mode][slot];
504
505 if (!props->sglen || props->databytelen < NX_PAGE_SIZE) {
506 if (dev)
507 dev_warn(dev, "bogus sglen/databytelen for %u/%u/%u: "
508 "%u/%u (ignored)\n", fc, mode, slot,
509 props->sglen, props->databytelen);
510 return false;
511 }
512
513 return true;
514}
515
516static bool nx_check_props(struct device *dev, u32 fc, u32 mode)
517{
518 int i;
519
520 for (i = 0; i < 3; i++)
521 if (!nx_check_prop(dev, fc, mode, i))
522 return false;
523
524 return true;
525}
526
527static int nx_register_alg(struct crypto_alg *alg, u32 fc, u32 mode)
528{
529 return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
530 crypto_register_alg(alg) : 0;
531}
532
533static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode)
534{
535 return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
536 crypto_register_aead(alg) : 0;
537}
538
539static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot)
540{
541 return (slot >= 0 ? nx_check_prop(&nx_driver.viodev->dev,
542 fc, mode, slot) :
543 nx_check_props(&nx_driver.viodev->dev, fc, mode)) ?
544 crypto_register_shash(alg) : 0;
545}
546
547static void nx_unregister_alg(struct crypto_alg *alg, u32 fc, u32 mode)
548{
549 if (nx_check_props(NULL, fc, mode))
550 crypto_unregister_alg(alg);
551}
552
553static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode)
554{
555 if (nx_check_props(NULL, fc, mode))
556 crypto_unregister_aead(alg);
557}
558
559static void nx_unregister_shash(struct shash_alg *alg, u32 fc, u32 mode,
560 int slot)
561{
562 if (slot >= 0 ? nx_check_prop(NULL, fc, mode, slot) :
563 nx_check_props(NULL, fc, mode))
564 crypto_unregister_shash(alg);
565}
566
567
568
569
570
571
572
573
574
575
576static int nx_register_algs(void)
577{
578 int rc = -1;
579
580 if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY)
581 goto out;
582
583 memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
584
585 rc = NX_DEBUGFS_INIT(&nx_driver);
586 if (rc)
587 goto out;
588
589 nx_driver.of.status = NX_OKAY;
590
591 rc = nx_register_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
592 if (rc)
593 goto out;
594
595 rc = nx_register_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
596 if (rc)
597 goto out_unreg_ecb;
598
599 rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
600 if (rc)
601 goto out_unreg_cbc;
602
603 rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
604 if (rc)
605 goto out_unreg_ctr3686;
606
607 rc = nx_register_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
608 if (rc)
609 goto out_unreg_gcm;
610
611 rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
612 if (rc)
613 goto out_unreg_gcm4106;
614
615 rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
616 if (rc)
617 goto out_unreg_ccm;
618
619 rc = nx_register_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
620 NX_PROPS_SHA256);
621 if (rc)
622 goto out_unreg_ccm4309;
623
624 rc = nx_register_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
625 NX_PROPS_SHA512);
626 if (rc)
627 goto out_unreg_s256;
628
629 rc = nx_register_shash(&nx_shash_aes_xcbc_alg,
630 NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
631 if (rc)
632 goto out_unreg_s512;
633
634 goto out;
635
636out_unreg_s512:
637 nx_unregister_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
638 NX_PROPS_SHA512);
639out_unreg_s256:
640 nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
641 NX_PROPS_SHA256);
642out_unreg_ccm4309:
643 nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
644out_unreg_ccm:
645 nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
646out_unreg_gcm4106:
647 nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
648out_unreg_gcm:
649 nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
650out_unreg_ctr3686:
651 nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
652out_unreg_cbc:
653 nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
654out_unreg_ecb:
655 nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
656out:
657 return rc;
658}
659
660
661
662
663
664
665
666
667static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
668{
669 if (nx_driver.of.status != NX_OKAY) {
670 pr_err("Attempt to initialize NX crypto context while device "
671 "is not available!\n");
672 return -ENODEV;
673 }
674
675
676 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
677 nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) +
678 sizeof(struct nx_csbcpb);
679 else
680 nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
681 sizeof(struct nx_csbcpb);
682
683 nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
684 if (!nx_ctx->kmem)
685 return -ENOMEM;
686
687
688 nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem,
689 (u64)NX_PAGE_SIZE));
690 nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE);
691 nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE);
692
693 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
694 nx_ctx->csbcpb_aead =
695 (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg +
696 NX_PAGE_SIZE);
697
698
699
700 nx_ctx->stats = &nx_driver.stats;
701 memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode],
702 sizeof(struct alg_props) * 3);
703
704 return 0;
705}
706
707
708int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm)
709{
710 crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx));
711 return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
712 NX_MODE_AES_CCM);
713}
714
715int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
716{
717 crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx));
718 return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
719 NX_MODE_AES_GCM);
720}
721
722int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm)
723{
724 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
725 NX_MODE_AES_CTR);
726}
727
728int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm)
729{
730 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
731 NX_MODE_AES_CBC);
732}
733
734int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm)
735{
736 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
737 NX_MODE_AES_ECB);
738}
739
740int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
741{
742 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
743}
744
745int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
746{
747 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
748 NX_MODE_AES_XCBC_MAC);
749}
750
751
752
753
754
755
756
757
758
759void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
760{
761 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
762
763 kzfree(nx_ctx->kmem);
764 nx_ctx->csbcpb = NULL;
765 nx_ctx->csbcpb_aead = NULL;
766 nx_ctx->in_sg = NULL;
767 nx_ctx->out_sg = NULL;
768}
769
770void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
771{
772 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
773
774 kzfree(nx_ctx->kmem);
775}
776
777static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
778{
779 dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
780 viodev->name, viodev->resource_id);
781
782 if (nx_driver.viodev) {
783 dev_err(&viodev->dev, "%s: Attempt to register more than one "
784 "instance of the hardware\n", __func__);
785 return -EINVAL;
786 }
787
788 nx_driver.viodev = viodev;
789
790 nx_of_init(&viodev->dev, &nx_driver.of);
791
792 return nx_register_algs();
793}
794
795static int nx_remove(struct vio_dev *viodev)
796{
797 dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n",
798 viodev->unit_address);
799
800 if (nx_driver.of.status == NX_OKAY) {
801 NX_DEBUGFS_FINI(&nx_driver);
802
803 nx_unregister_shash(&nx_shash_aes_xcbc_alg,
804 NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
805 nx_unregister_shash(&nx_shash_sha512_alg,
806 NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
807 nx_unregister_shash(&nx_shash_sha256_alg,
808 NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
809 nx_unregister_aead(&nx_ccm4309_aes_alg,
810 NX_FC_AES, NX_MODE_AES_CCM);
811 nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
812 nx_unregister_aead(&nx_gcm4106_aes_alg,
813 NX_FC_AES, NX_MODE_AES_GCM);
814 nx_unregister_aead(&nx_gcm_aes_alg,
815 NX_FC_AES, NX_MODE_AES_GCM);
816 nx_unregister_alg(&nx_ctr3686_aes_alg,
817 NX_FC_AES, NX_MODE_AES_CTR);
818 nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
819 nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
820 }
821
822 return 0;
823}
824
825
826
827static int __init nx_init(void)
828{
829 return vio_register_driver(&nx_driver.viodriver);
830}
831
832static void __exit nx_fini(void)
833{
834 vio_unregister_driver(&nx_driver.viodriver);
835}
836
837static struct vio_device_id nx_crypto_driver_ids[] = {
838 { "ibm,sym-encryption-v1", "ibm,sym-encryption" },
839 { "", "" }
840};
841MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids);
842
843
844struct nx_crypto_driver nx_driver = {
845 .viodriver = {
846 .id_table = nx_crypto_driver_ids,
847 .probe = nx_probe,
848 .remove = nx_remove,
849 .name = NX_NAME,
850 },
851};
852
853module_init(nx_init);
854module_exit(nx_fini);
855
856MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>");
857MODULE_DESCRIPTION(NX_STRING);
858MODULE_LICENSE("GPL");
859MODULE_VERSION(NX_VERSION);
860