1
2
3
4
5
6
7
8
9
10
11#include <asm/vio.h>
12
13#include "nx-842.h"
14#include "nx_csbcpb.h"
15
16MODULE_LICENSE("GPL");
17MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
18MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
19MODULE_ALIAS_CRYPTO("842");
20MODULE_ALIAS_CRYPTO("842-nx");
21
22static struct nx842_constraints nx842_pseries_constraints = {
23 .alignment = DDE_BUFFER_ALIGN,
24 .multiple = DDE_BUFFER_LAST_MULT,
25 .minimum = DDE_BUFFER_LAST_MULT,
26 .maximum = PAGE_SIZE,
27};
28
29static int check_constraints(unsigned long buf, unsigned int *len, bool in)
30{
31 if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) {
32 pr_debug("%s buffer 0x%lx not aligned to 0x%x\n",
33 in ? "input" : "output", buf,
34 nx842_pseries_constraints.alignment);
35 return -EINVAL;
36 }
37 if (*len % nx842_pseries_constraints.multiple) {
38 pr_debug("%s buffer len 0x%x not multiple of 0x%x\n",
39 in ? "input" : "output", *len,
40 nx842_pseries_constraints.multiple);
41 if (in)
42 return -EINVAL;
43 *len = round_down(*len, nx842_pseries_constraints.multiple);
44 }
45 if (*len < nx842_pseries_constraints.minimum) {
46 pr_debug("%s buffer len 0x%x under minimum 0x%x\n",
47 in ? "input" : "output", *len,
48 nx842_pseries_constraints.minimum);
49 return -EINVAL;
50 }
51 if (*len > nx842_pseries_constraints.maximum) {
52 pr_debug("%s buffer len 0x%x over maximum 0x%x\n",
53 in ? "input" : "output", *len,
54 nx842_pseries_constraints.maximum);
55 if (in)
56 return -EINVAL;
57 *len = nx842_pseries_constraints.maximum;
58 }
59 return 0;
60}
61
62
63#define WORKMEM_ALIGN (256)
64
65struct nx842_workmem {
66
67 char slin[4096];
68 char slout[4096];
69
70 struct nx_csbcpb csbcpb;
71
72 char padding[WORKMEM_ALIGN];
73} __aligned(WORKMEM_ALIGN);
74
75
76
77#define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7))
78
79
80
81
82
83#define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7))
84#define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6))
85#define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5))
86
87
88#define NX842_HW_PAGE_SIZE (4096)
89#define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1))
90
91struct ibm_nx842_counters {
92 atomic64_t comp_complete;
93 atomic64_t comp_failed;
94 atomic64_t decomp_complete;
95 atomic64_t decomp_failed;
96 atomic64_t swdecomp;
97 atomic64_t comp_times[32];
98 atomic64_t decomp_times[32];
99};
100
101static struct nx842_devdata {
102 struct vio_dev *vdev;
103 struct device *dev;
104 struct ibm_nx842_counters *counters;
105 unsigned int max_sg_len;
106 unsigned int max_sync_size;
107 unsigned int max_sync_sg;
108} __rcu *devdata;
109static DEFINE_SPINLOCK(devdata_mutex);
110
111#define NX842_COUNTER_INC(_x) \
112static inline void nx842_inc_##_x( \
113 const struct nx842_devdata *dev) { \
114 if (dev) \
115 atomic64_inc(&dev->counters->_x); \
116}
117NX842_COUNTER_INC(comp_complete);
118NX842_COUNTER_INC(comp_failed);
119NX842_COUNTER_INC(decomp_complete);
120NX842_COUNTER_INC(decomp_failed);
121NX842_COUNTER_INC(swdecomp);
122
123#define NX842_HIST_SLOTS 16
124
125static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
126{
127 int bucket = fls(time);
128
129 if (bucket)
130 bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
131
132 atomic64_inc(×[bucket]);
133}
134
135
136#define NX842_OP_COMPRESS 0x0
137#define NX842_OP_CRC 0x1
138#define NX842_OP_DECOMPRESS 0x2
139#define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC)
140#define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC)
141#define NX842_OP_ASYNC (1<<23)
142#define NX842_OP_NOTIFY (1<<22)
143#define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8)
144
145static unsigned long nx842_get_desired_dma(struct vio_dev *viodev)
146{
147
148 return 0;
149}
150
151struct nx842_slentry {
152 __be64 ptr;
153 __be64 len;
154};
155
156
157struct nx842_scatterlist {
158 int entry_nr;
159 struct nx842_slentry *entries;
160};
161
162
163static inline unsigned long nx842_get_scatterlist_size(
164 struct nx842_scatterlist *sl)
165{
166 return sl->entry_nr * sizeof(struct nx842_slentry);
167}
168
169static int nx842_build_scatterlist(unsigned long buf, int len,
170 struct nx842_scatterlist *sl)
171{
172 unsigned long entrylen;
173 struct nx842_slentry *entry;
174
175 sl->entry_nr = 0;
176
177 entry = sl->entries;
178 while (len) {
179 entry->ptr = cpu_to_be64(nx842_get_pa((void *)buf));
180 entrylen = min_t(int, len,
181 LEN_ON_SIZE(buf, NX842_HW_PAGE_SIZE));
182 entry->len = cpu_to_be64(entrylen);
183
184 len -= entrylen;
185 buf += entrylen;
186
187 sl->entry_nr++;
188 entry++;
189 }
190
191 return 0;
192}
193
194static int nx842_validate_result(struct device *dev,
195 struct cop_status_block *csb)
196{
197
198 if (!NX842_CSBCBP_VALID_CHK(csb->valid)) {
199 dev_err(dev, "%s: cspcbp not valid upon completion.\n",
200 __func__);
201 dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n",
202 csb->valid,
203 csb->crb_seq_number,
204 csb->completion_code,
205 csb->completion_extension);
206 dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n",
207 be32_to_cpu(csb->processed_byte_count),
208 (unsigned long)be64_to_cpu(csb->address));
209 return -EIO;
210 }
211
212
213 switch (csb->completion_code) {
214 case 0:
215 break;
216 case 64:
217 dev_dbg(dev, "%s: output size larger than input size\n",
218 __func__);
219 break;
220 case 13:
221 dev_dbg(dev, "%s: Out of space in output buffer\n",
222 __func__);
223 return -ENOSPC;
224 case 65:
225 dev_dbg(dev, "%s: CRC mismatch for decompression\n",
226 __func__);
227 return -EINVAL;
228 case 66:
229 case 67:
230 dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
231 __func__, csb->completion_code);
232 return -EINVAL;
233 default:
234 dev_dbg(dev, "%s: Unspecified error (code:%d)\n",
235 __func__, csb->completion_code);
236 return -EIO;
237 }
238
239
240 if (!NX842_CSBCPB_CE2(csb->completion_extension)) {
241 dev_err(dev, "%s: No error returned by hardware, but "
242 "data returned is unusable, contact support.\n"
243 "(Additional info: csbcbp->processed bytes "
244 "does not specify processed bytes for the "
245 "target buffer.)\n", __func__);
246 return -EIO;
247 }
248
249 return 0;
250}
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
278 unsigned char *out, unsigned int *outlen,
279 void *wmem)
280{
281 struct nx842_devdata *local_devdata;
282 struct device *dev = NULL;
283 struct nx842_workmem *workmem;
284 struct nx842_scatterlist slin, slout;
285 struct nx_csbcpb *csbcpb;
286 int ret = 0;
287 unsigned long inbuf, outbuf;
288 struct vio_pfo_op op = {
289 .done = NULL,
290 .handle = 0,
291 .timeout = 0,
292 };
293 unsigned long start = get_tb();
294
295 inbuf = (unsigned long)in;
296 if (check_constraints(inbuf, &inlen, true))
297 return -EINVAL;
298
299 outbuf = (unsigned long)out;
300 if (check_constraints(outbuf, outlen, false))
301 return -EINVAL;
302
303 rcu_read_lock();
304 local_devdata = rcu_dereference(devdata);
305 if (!local_devdata || !local_devdata->dev) {
306 rcu_read_unlock();
307 return -ENODEV;
308 }
309 dev = local_devdata->dev;
310
311
312 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
313 slin.entries = (struct nx842_slentry *)workmem->slin;
314 slout.entries = (struct nx842_slentry *)workmem->slout;
315
316
317 op.flags = NX842_OP_COMPRESS_CRC;
318 csbcpb = &workmem->csbcpb;
319 memset(csbcpb, 0, sizeof(*csbcpb));
320 op.csbcpb = nx842_get_pa(csbcpb);
321
322 if ((inbuf & NX842_HW_PAGE_MASK) ==
323 ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
324
325 op.in = nx842_get_pa((void *)inbuf);
326 op.inlen = inlen;
327 } else {
328
329 nx842_build_scatterlist(inbuf, inlen, &slin);
330 op.in = nx842_get_pa(slin.entries);
331 op.inlen = -nx842_get_scatterlist_size(&slin);
332 }
333
334 if ((outbuf & NX842_HW_PAGE_MASK) ==
335 ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
336
337 op.out = nx842_get_pa((void *)outbuf);
338 op.outlen = *outlen;
339 } else {
340
341 nx842_build_scatterlist(outbuf, *outlen, &slout);
342 op.out = nx842_get_pa(slout.entries);
343 op.outlen = -nx842_get_scatterlist_size(&slout);
344 }
345
346 dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
347 __func__, (unsigned long)op.in, (long)op.inlen,
348 (unsigned long)op.out, (long)op.outlen);
349
350
351 ret = vio_h_cop_sync(local_devdata->vdev, &op);
352
353
354 if (ret) {
355 dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
356 __func__, ret, op.hcall_err);
357 ret = -EIO;
358 goto unlock;
359 }
360
361
362 ret = nx842_validate_result(dev, &csbcpb->csb);
363 if (ret)
364 goto unlock;
365
366 *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
367 dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen);
368
369unlock:
370 if (ret)
371 nx842_inc_comp_failed(local_devdata);
372 else {
373 nx842_inc_comp_complete(local_devdata);
374 ibm_nx842_incr_hist(local_devdata->counters->comp_times,
375 (get_tb() - start) / tb_ticks_per_usec);
376 }
377 rcu_read_unlock();
378 return ret;
379}
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
408 unsigned char *out, unsigned int *outlen,
409 void *wmem)
410{
411 struct nx842_devdata *local_devdata;
412 struct device *dev = NULL;
413 struct nx842_workmem *workmem;
414 struct nx842_scatterlist slin, slout;
415 struct nx_csbcpb *csbcpb;
416 int ret = 0;
417 unsigned long inbuf, outbuf;
418 struct vio_pfo_op op = {
419 .done = NULL,
420 .handle = 0,
421 .timeout = 0,
422 };
423 unsigned long start = get_tb();
424
425
426 inbuf = (unsigned long)in;
427 if (check_constraints(inbuf, &inlen, true))
428 return -EINVAL;
429
430 outbuf = (unsigned long)out;
431 if (check_constraints(outbuf, outlen, false))
432 return -EINVAL;
433
434 rcu_read_lock();
435 local_devdata = rcu_dereference(devdata);
436 if (!local_devdata || !local_devdata->dev) {
437 rcu_read_unlock();
438 return -ENODEV;
439 }
440 dev = local_devdata->dev;
441
442 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
443
444
445 slin.entries = (struct nx842_slentry *)workmem->slin;
446 slout.entries = (struct nx842_slentry *)workmem->slout;
447
448
449 op.flags = NX842_OP_DECOMPRESS_CRC;
450 csbcpb = &workmem->csbcpb;
451 memset(csbcpb, 0, sizeof(*csbcpb));
452 op.csbcpb = nx842_get_pa(csbcpb);
453
454 if ((inbuf & NX842_HW_PAGE_MASK) ==
455 ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
456
457 op.in = nx842_get_pa((void *)inbuf);
458 op.inlen = inlen;
459 } else {
460
461 nx842_build_scatterlist(inbuf, inlen, &slin);
462 op.in = nx842_get_pa(slin.entries);
463 op.inlen = -nx842_get_scatterlist_size(&slin);
464 }
465
466 if ((outbuf & NX842_HW_PAGE_MASK) ==
467 ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
468
469 op.out = nx842_get_pa((void *)outbuf);
470 op.outlen = *outlen;
471 } else {
472
473 nx842_build_scatterlist(outbuf, *outlen, &slout);
474 op.out = nx842_get_pa(slout.entries);
475 op.outlen = -nx842_get_scatterlist_size(&slout);
476 }
477
478 dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
479 __func__, (unsigned long)op.in, (long)op.inlen,
480 (unsigned long)op.out, (long)op.outlen);
481
482
483 ret = vio_h_cop_sync(local_devdata->vdev, &op);
484
485
486 if (ret) {
487 dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
488 __func__, ret, op.hcall_err);
489 goto unlock;
490 }
491
492
493 ret = nx842_validate_result(dev, &csbcpb->csb);
494 if (ret)
495 goto unlock;
496
497 *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
498
499unlock:
500 if (ret)
501
502 nx842_inc_decomp_failed(local_devdata);
503 else {
504 nx842_inc_decomp_complete(local_devdata);
505 ibm_nx842_incr_hist(local_devdata->counters->decomp_times,
506 (get_tb() - start) / tb_ticks_per_usec);
507 }
508
509 rcu_read_unlock();
510 return ret;
511}
512
513
514
515
516
517
518
519
520
521
522static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
523{
524 if (devdata) {
525 devdata->max_sync_size = 0;
526 devdata->max_sync_sg = 0;
527 devdata->max_sg_len = 0;
528 return 0;
529 } else
530 return -ENOENT;
531}
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547static int nx842_OF_upd_status(struct property *prop)
548{
549 const char *status = (const char *)prop->value;
550
551 if (!strncmp(status, "okay", (size_t)prop->length))
552 return 0;
553 if (!strncmp(status, "disabled", (size_t)prop->length))
554 return -ENODEV;
555 dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
556
557 return -EINVAL;
558}
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
582 struct property *prop) {
583 int ret = 0;
584 const unsigned int maxsglen = of_read_number(prop->value, 1);
585
586 if (prop->length != sizeof(maxsglen)) {
587 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__);
588 dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__,
589 prop->length, sizeof(maxsglen));
590 ret = -EINVAL;
591 } else {
592 devdata->max_sg_len = min_t(unsigned int,
593 maxsglen, NX842_HW_PAGE_SIZE);
594 }
595
596 return ret;
597}
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata,
630 struct property *prop) {
631 int ret = 0;
632 unsigned int comp_data_limit, decomp_data_limit;
633 unsigned int comp_sg_limit, decomp_sg_limit;
634 const struct maxsynccop_t {
635 __be32 comp_elements;
636 __be32 comp_data_limit;
637 __be32 comp_sg_limit;
638 __be32 decomp_elements;
639 __be32 decomp_data_limit;
640 __be32 decomp_sg_limit;
641 } *maxsynccop;
642
643 if (prop->length != sizeof(*maxsynccop)) {
644 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__);
645 dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length,
646 sizeof(*maxsynccop));
647 ret = -EINVAL;
648 goto out;
649 }
650
651 maxsynccop = (const struct maxsynccop_t *)prop->value;
652 comp_data_limit = be32_to_cpu(maxsynccop->comp_data_limit);
653 comp_sg_limit = be32_to_cpu(maxsynccop->comp_sg_limit);
654 decomp_data_limit = be32_to_cpu(maxsynccop->decomp_data_limit);
655 decomp_sg_limit = be32_to_cpu(maxsynccop->decomp_sg_limit);
656
657
658
659
660
661 devdata->max_sync_size = min(comp_data_limit, decomp_data_limit);
662
663 devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size,
664 65536);
665
666 if (devdata->max_sync_size < 4096) {
667 dev_err(devdata->dev, "%s: hardware max data size (%u) is "
668 "less than the driver minimum, unable to use "
669 "the hardware device\n",
670 __func__, devdata->max_sync_size);
671 ret = -EINVAL;
672 goto out;
673 }
674
675 nx842_pseries_constraints.maximum = devdata->max_sync_size;
676
677 devdata->max_sync_sg = min(comp_sg_limit, decomp_sg_limit);
678 if (devdata->max_sync_sg < 1) {
679 dev_err(devdata->dev, "%s: hardware max sg size (%u) is "
680 "less than the driver minimum, unable to use "
681 "the hardware device\n",
682 __func__, devdata->max_sync_sg);
683 ret = -EINVAL;
684 goto out;
685 }
686
687out:
688 return ret;
689}
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710static int nx842_OF_upd(struct property *new_prop)
711{
712 struct nx842_devdata *old_devdata = NULL;
713 struct nx842_devdata *new_devdata = NULL;
714 struct device_node *of_node = NULL;
715 struct property *status = NULL;
716 struct property *maxsglen = NULL;
717 struct property *maxsyncop = NULL;
718 int ret = 0;
719 unsigned long flags;
720
721 new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
722 if (!new_devdata)
723 return -ENOMEM;
724
725 spin_lock_irqsave(&devdata_mutex, flags);
726 old_devdata = rcu_dereference_check(devdata,
727 lockdep_is_held(&devdata_mutex));
728 if (old_devdata)
729 of_node = old_devdata->dev->of_node;
730
731 if (!old_devdata || !of_node) {
732 pr_err("%s: device is not available\n", __func__);
733 spin_unlock_irqrestore(&devdata_mutex, flags);
734 kfree(new_devdata);
735 return -ENODEV;
736 }
737
738 memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
739 new_devdata->counters = old_devdata->counters;
740
741
742 status = of_find_property(of_node, "status", NULL);
743 maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL);
744 maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL);
745 if (!status || !maxsglen || !maxsyncop) {
746 dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__);
747 ret = -EINVAL;
748 goto error_out;
749 }
750
751
752
753
754
755 if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) ||
756 strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) ||
757 strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length)))
758 goto out;
759
760
761 ret = nx842_OF_upd_status(status);
762 if (ret)
763 goto error_out;
764
765 ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen);
766 if (ret)
767 goto error_out;
768
769 ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop);
770 if (ret)
771 goto error_out;
772
773out:
774 dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n",
775 __func__, new_devdata->max_sync_size,
776 old_devdata->max_sync_size);
777 dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n",
778 __func__, new_devdata->max_sync_sg,
779 old_devdata->max_sync_sg);
780 dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n",
781 __func__, new_devdata->max_sg_len,
782 old_devdata->max_sg_len);
783
784 rcu_assign_pointer(devdata, new_devdata);
785 spin_unlock_irqrestore(&devdata_mutex, flags);
786 synchronize_rcu();
787 dev_set_drvdata(new_devdata->dev, new_devdata);
788 kfree(old_devdata);
789 return 0;
790
791error_out:
792 if (new_devdata) {
793 dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
794 nx842_OF_set_defaults(new_devdata);
795 rcu_assign_pointer(devdata, new_devdata);
796 spin_unlock_irqrestore(&devdata_mutex, flags);
797 synchronize_rcu();
798 dev_set_drvdata(new_devdata->dev, new_devdata);
799 kfree(old_devdata);
800 } else {
801 dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
802 spin_unlock_irqrestore(&devdata_mutex, flags);
803 }
804
805 if (!ret)
806 ret = -EINVAL;
807 return ret;
808}
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
824 void *data)
825{
826 struct of_reconfig_data *upd = data;
827 struct nx842_devdata *local_devdata;
828 struct device_node *node = NULL;
829
830 rcu_read_lock();
831 local_devdata = rcu_dereference(devdata);
832 if (local_devdata)
833 node = local_devdata->dev->of_node;
834
835 if (local_devdata &&
836 action == OF_RECONFIG_UPDATE_PROPERTY &&
837 !strcmp(upd->dn->name, node->name)) {
838 rcu_read_unlock();
839 nx842_OF_upd(upd->prop);
840 } else
841 rcu_read_unlock();
842
843 return NOTIFY_OK;
844}
845
846static struct notifier_block nx842_of_nb = {
847 .notifier_call = nx842_OF_notifier,
848};
849
850#define nx842_counter_read(_name) \
851static ssize_t nx842_##_name##_show(struct device *dev, \
852 struct device_attribute *attr, \
853 char *buf) { \
854 struct nx842_devdata *local_devdata; \
855 int p = 0; \
856 rcu_read_lock(); \
857 local_devdata = rcu_dereference(devdata); \
858 if (local_devdata) \
859 p = snprintf(buf, PAGE_SIZE, "%lld\n", \
860 atomic64_read(&local_devdata->counters->_name)); \
861 rcu_read_unlock(); \
862 return p; \
863}
864
865#define NX842DEV_COUNTER_ATTR_RO(_name) \
866 nx842_counter_read(_name); \
867 static struct device_attribute dev_attr_##_name = __ATTR(_name, \
868 0444, \
869 nx842_##_name##_show,\
870 NULL);
871
872NX842DEV_COUNTER_ATTR_RO(comp_complete);
873NX842DEV_COUNTER_ATTR_RO(comp_failed);
874NX842DEV_COUNTER_ATTR_RO(decomp_complete);
875NX842DEV_COUNTER_ATTR_RO(decomp_failed);
876NX842DEV_COUNTER_ATTR_RO(swdecomp);
877
878static ssize_t nx842_timehist_show(struct device *,
879 struct device_attribute *, char *);
880
881static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444,
882 nx842_timehist_show, NULL);
883static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times,
884 0444, nx842_timehist_show, NULL);
885
886static ssize_t nx842_timehist_show(struct device *dev,
887 struct device_attribute *attr, char *buf) {
888 char *p = buf;
889 struct nx842_devdata *local_devdata;
890 atomic64_t *times;
891 int bytes_remain = PAGE_SIZE;
892 int bytes;
893 int i;
894
895 rcu_read_lock();
896 local_devdata = rcu_dereference(devdata);
897 if (!local_devdata) {
898 rcu_read_unlock();
899 return 0;
900 }
901
902 if (attr == &dev_attr_comp_times)
903 times = local_devdata->counters->comp_times;
904 else if (attr == &dev_attr_decomp_times)
905 times = local_devdata->counters->decomp_times;
906 else {
907 rcu_read_unlock();
908 return 0;
909 }
910
911 for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
912 bytes = snprintf(p, bytes_remain, "%u-%uus:\t%lld\n",
913 i ? (2<<(i-1)) : 0, (2<<i)-1,
914 atomic64_read(×[i]));
915 bytes_remain -= bytes;
916 p += bytes;
917 }
918
919
920 bytes = snprintf(p, bytes_remain, "%uus - :\t%lld\n",
921 2<<(NX842_HIST_SLOTS - 2),
922 atomic64_read(×[(NX842_HIST_SLOTS - 1)]));
923 p += bytes;
924
925 rcu_read_unlock();
926 return p - buf;
927}
928
929static struct attribute *nx842_sysfs_entries[] = {
930 &dev_attr_comp_complete.attr,
931 &dev_attr_comp_failed.attr,
932 &dev_attr_decomp_complete.attr,
933 &dev_attr_decomp_failed.attr,
934 &dev_attr_swdecomp.attr,
935 &dev_attr_comp_times.attr,
936 &dev_attr_decomp_times.attr,
937 NULL,
938};
939
940static struct attribute_group nx842_attribute_group = {
941 .name = NULL,
942 .attrs = nx842_sysfs_entries,
943};
944
945static struct nx842_driver nx842_pseries_driver = {
946 .name = KBUILD_MODNAME,
947 .owner = THIS_MODULE,
948 .workmem_size = sizeof(struct nx842_workmem),
949 .constraints = &nx842_pseries_constraints,
950 .compress = nx842_pseries_compress,
951 .decompress = nx842_pseries_decompress,
952};
953
954static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
955{
956 return nx842_crypto_init(tfm, &nx842_pseries_driver);
957}
958
959static struct crypto_alg nx842_pseries_alg = {
960 .cra_name = "842",
961 .cra_driver_name = "842-nx",
962 .cra_priority = 300,
963 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
964 .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
965 .cra_module = THIS_MODULE,
966 .cra_init = nx842_pseries_crypto_init,
967 .cra_exit = nx842_crypto_exit,
968 .cra_u = { .compress = {
969 .coa_compress = nx842_crypto_compress,
970 .coa_decompress = nx842_crypto_decompress } }
971};
972
973static int nx842_probe(struct vio_dev *viodev,
974 const struct vio_device_id *id)
975{
976 struct nx842_devdata *old_devdata, *new_devdata = NULL;
977 unsigned long flags;
978 int ret = 0;
979
980 new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
981 if (!new_devdata)
982 return -ENOMEM;
983
984 new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
985 GFP_NOFS);
986 if (!new_devdata->counters) {
987 kfree(new_devdata);
988 return -ENOMEM;
989 }
990
991 spin_lock_irqsave(&devdata_mutex, flags);
992 old_devdata = rcu_dereference_check(devdata,
993 lockdep_is_held(&devdata_mutex));
994
995 if (old_devdata && old_devdata->vdev != NULL) {
996 dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
997 ret = -1;
998 goto error_unlock;
999 }
1000
1001 dev_set_drvdata(&viodev->dev, NULL);
1002
1003 new_devdata->vdev = viodev;
1004 new_devdata->dev = &viodev->dev;
1005 nx842_OF_set_defaults(new_devdata);
1006
1007 rcu_assign_pointer(devdata, new_devdata);
1008 spin_unlock_irqrestore(&devdata_mutex, flags);
1009 synchronize_rcu();
1010 kfree(old_devdata);
1011
1012 of_reconfig_notifier_register(&nx842_of_nb);
1013
1014 ret = nx842_OF_upd(NULL);
1015 if (ret)
1016 goto error;
1017
1018 ret = crypto_register_alg(&nx842_pseries_alg);
1019 if (ret) {
1020 dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
1021 goto error;
1022 }
1023
1024 rcu_read_lock();
1025 dev_set_drvdata(&viodev->dev, rcu_dereference(devdata));
1026 rcu_read_unlock();
1027
1028 if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) {
1029 dev_err(&viodev->dev, "could not create sysfs device attributes\n");
1030 ret = -1;
1031 goto error;
1032 }
1033
1034 return 0;
1035
1036error_unlock:
1037 spin_unlock_irqrestore(&devdata_mutex, flags);
1038 if (new_devdata)
1039 kfree(new_devdata->counters);
1040 kfree(new_devdata);
1041error:
1042 return ret;
1043}
1044
1045static int nx842_remove(struct vio_dev *viodev)
1046{
1047 struct nx842_devdata *old_devdata;
1048 unsigned long flags;
1049
1050 pr_info("Removing IBM Power 842 compression device\n");
1051 sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
1052
1053 crypto_unregister_alg(&nx842_pseries_alg);
1054
1055 spin_lock_irqsave(&devdata_mutex, flags);
1056 old_devdata = rcu_dereference_check(devdata,
1057 lockdep_is_held(&devdata_mutex));
1058 of_reconfig_notifier_unregister(&nx842_of_nb);
1059 RCU_INIT_POINTER(devdata, NULL);
1060 spin_unlock_irqrestore(&devdata_mutex, flags);
1061 synchronize_rcu();
1062 dev_set_drvdata(&viodev->dev, NULL);
1063 if (old_devdata)
1064 kfree(old_devdata->counters);
1065 kfree(old_devdata);
1066
1067 return 0;
1068}
1069
1070static const struct vio_device_id nx842_vio_driver_ids[] = {
1071 {"ibm,compression-v1", "ibm,compression"},
1072 {"", ""},
1073};
1074
1075static struct vio_driver nx842_vio_driver = {
1076 .name = KBUILD_MODNAME,
1077 .probe = nx842_probe,
1078 .remove = nx842_remove,
1079 .get_desired_dma = nx842_get_desired_dma,
1080 .id_table = nx842_vio_driver_ids,
1081};
1082
1083static int __init nx842_pseries_init(void)
1084{
1085 struct nx842_devdata *new_devdata;
1086 int ret;
1087
1088 if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
1089 return -ENODEV;
1090
1091 RCU_INIT_POINTER(devdata, NULL);
1092 new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
1093 if (!new_devdata)
1094 return -ENOMEM;
1095
1096 RCU_INIT_POINTER(devdata, new_devdata);
1097
1098 ret = vio_register_driver(&nx842_vio_driver);
1099 if (ret) {
1100 pr_err("Could not register VIO driver %d\n", ret);
1101
1102 kfree(new_devdata);
1103 return ret;
1104 }
1105
1106 return 0;
1107}
1108
1109module_init(nx842_pseries_init);
1110
1111static void __exit nx842_pseries_exit(void)
1112{
1113 struct nx842_devdata *old_devdata;
1114 unsigned long flags;
1115
1116 crypto_unregister_alg(&nx842_pseries_alg);
1117
1118 spin_lock_irqsave(&devdata_mutex, flags);
1119 old_devdata = rcu_dereference_check(devdata,
1120 lockdep_is_held(&devdata_mutex));
1121 RCU_INIT_POINTER(devdata, NULL);
1122 spin_unlock_irqrestore(&devdata_mutex, flags);
1123 synchronize_rcu();
1124 if (old_devdata && old_devdata->dev)
1125 dev_set_drvdata(old_devdata->dev, NULL);
1126 kfree(old_devdata);
1127 vio_unregister_driver(&nx842_vio_driver);
1128}
1129
1130module_exit(nx842_pseries_exit);
1131
1132