1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <asm/vio.h>
25
26#include "nx-842.h"
27#include "nx_csbcpb.h"
28
29MODULE_LICENSE("GPL");
30MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
31MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
32MODULE_ALIAS_CRYPTO("842");
33MODULE_ALIAS_CRYPTO("842-nx");
34
35static struct nx842_constraints nx842_pseries_constraints = {
36 .alignment = DDE_BUFFER_ALIGN,
37 .multiple = DDE_BUFFER_LAST_MULT,
38 .minimum = DDE_BUFFER_LAST_MULT,
39 .maximum = PAGE_SIZE,
40};
41
42static int check_constraints(unsigned long buf, unsigned int *len, bool in)
43{
44 if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) {
45 pr_debug("%s buffer 0x%lx not aligned to 0x%x\n",
46 in ? "input" : "output", buf,
47 nx842_pseries_constraints.alignment);
48 return -EINVAL;
49 }
50 if (*len % nx842_pseries_constraints.multiple) {
51 pr_debug("%s buffer len 0x%x not multiple of 0x%x\n",
52 in ? "input" : "output", *len,
53 nx842_pseries_constraints.multiple);
54 if (in)
55 return -EINVAL;
56 *len = round_down(*len, nx842_pseries_constraints.multiple);
57 }
58 if (*len < nx842_pseries_constraints.minimum) {
59 pr_debug("%s buffer len 0x%x under minimum 0x%x\n",
60 in ? "input" : "output", *len,
61 nx842_pseries_constraints.minimum);
62 return -EINVAL;
63 }
64 if (*len > nx842_pseries_constraints.maximum) {
65 pr_debug("%s buffer len 0x%x over maximum 0x%x\n",
66 in ? "input" : "output", *len,
67 nx842_pseries_constraints.maximum);
68 if (in)
69 return -EINVAL;
70 *len = nx842_pseries_constraints.maximum;
71 }
72 return 0;
73}
74
75
76#define WORKMEM_ALIGN (256)
77
78struct nx842_workmem {
79
80 char slin[4096];
81 char slout[4096];
82
83 struct nx_csbcpb csbcpb;
84
85 char padding[WORKMEM_ALIGN];
86} __aligned(WORKMEM_ALIGN);
87
88
89
90#define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7))
91
92
93
94
95
96#define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7))
97#define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6))
98#define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5))
99
100
101#define NX842_HW_PAGE_SIZE (4096)
102#define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1))
103
104struct ibm_nx842_counters {
105 atomic64_t comp_complete;
106 atomic64_t comp_failed;
107 atomic64_t decomp_complete;
108 atomic64_t decomp_failed;
109 atomic64_t swdecomp;
110 atomic64_t comp_times[32];
111 atomic64_t decomp_times[32];
112};
113
114static struct nx842_devdata {
115 struct vio_dev *vdev;
116 struct device *dev;
117 struct ibm_nx842_counters *counters;
118 unsigned int max_sg_len;
119 unsigned int max_sync_size;
120 unsigned int max_sync_sg;
121} __rcu *devdata;
122static DEFINE_SPINLOCK(devdata_mutex);
123
124#define NX842_COUNTER_INC(_x) \
125static inline void nx842_inc_##_x( \
126 const struct nx842_devdata *dev) { \
127 if (dev) \
128 atomic64_inc(&dev->counters->_x); \
129}
130NX842_COUNTER_INC(comp_complete);
131NX842_COUNTER_INC(comp_failed);
132NX842_COUNTER_INC(decomp_complete);
133NX842_COUNTER_INC(decomp_failed);
134NX842_COUNTER_INC(swdecomp);
135
136#define NX842_HIST_SLOTS 16
137
138static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
139{
140 int bucket = fls(time);
141
142 if (bucket)
143 bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
144
145 atomic64_inc(×[bucket]);
146}
147
148
149#define NX842_OP_COMPRESS 0x0
150#define NX842_OP_CRC 0x1
151#define NX842_OP_DECOMPRESS 0x2
152#define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC)
153#define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC)
154#define NX842_OP_ASYNC (1<<23)
155#define NX842_OP_NOTIFY (1<<22)
156#define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8)
157
158static unsigned long nx842_get_desired_dma(struct vio_dev *viodev)
159{
160
161 return 0;
162}
163
164struct nx842_slentry {
165 __be64 ptr;
166 __be64 len;
167};
168
169
170struct nx842_scatterlist {
171 int entry_nr;
172 struct nx842_slentry *entries;
173};
174
175
176static inline unsigned long nx842_get_scatterlist_size(
177 struct nx842_scatterlist *sl)
178{
179 return sl->entry_nr * sizeof(struct nx842_slentry);
180}
181
182static int nx842_build_scatterlist(unsigned long buf, int len,
183 struct nx842_scatterlist *sl)
184{
185 unsigned long entrylen;
186 struct nx842_slentry *entry;
187
188 sl->entry_nr = 0;
189
190 entry = sl->entries;
191 while (len) {
192 entry->ptr = cpu_to_be64(nx842_get_pa((void *)buf));
193 entrylen = min_t(int, len,
194 LEN_ON_SIZE(buf, NX842_HW_PAGE_SIZE));
195 entry->len = cpu_to_be64(entrylen);
196
197 len -= entrylen;
198 buf += entrylen;
199
200 sl->entry_nr++;
201 entry++;
202 }
203
204 return 0;
205}
206
207static int nx842_validate_result(struct device *dev,
208 struct cop_status_block *csb)
209{
210
211 if (!NX842_CSBCBP_VALID_CHK(csb->valid)) {
212 dev_err(dev, "%s: cspcbp not valid upon completion.\n",
213 __func__);
214 dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n",
215 csb->valid,
216 csb->crb_seq_number,
217 csb->completion_code,
218 csb->completion_extension);
219 dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n",
220 be32_to_cpu(csb->processed_byte_count),
221 (unsigned long)be64_to_cpu(csb->address));
222 return -EIO;
223 }
224
225
226 switch (csb->completion_code) {
227 case 0:
228 break;
229 case 64:
230 case 13:
231 dev_dbg(dev, "%s: Compression output larger than input\n",
232 __func__);
233 return -ENOSPC;
234 case 65:
235 dev_dbg(dev, "%s: CRC mismatch for decompression\n",
236 __func__);
237 return -EINVAL;
238 case 66:
239 case 67:
240 dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
241 __func__, csb->completion_code);
242 return -EINVAL;
243 default:
244 dev_dbg(dev, "%s: Unspecified error (code:%d)\n",
245 __func__, csb->completion_code);
246 return -EIO;
247 }
248
249
250 if (!NX842_CSBCPB_CE2(csb->completion_extension)) {
251 dev_err(dev, "%s: No error returned by hardware, but "
252 "data returned is unusable, contact support.\n"
253 "(Additional info: csbcbp->processed bytes "
254 "does not specify processed bytes for the "
255 "target buffer.)\n", __func__);
256 return -EIO;
257 }
258
259 return 0;
260}
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
288 unsigned char *out, unsigned int *outlen,
289 void *wmem)
290{
291 struct nx842_devdata *local_devdata;
292 struct device *dev = NULL;
293 struct nx842_workmem *workmem;
294 struct nx842_scatterlist slin, slout;
295 struct nx_csbcpb *csbcpb;
296 int ret = 0, max_sync_size;
297 unsigned long inbuf, outbuf;
298 struct vio_pfo_op op = {
299 .done = NULL,
300 .handle = 0,
301 .timeout = 0,
302 };
303 unsigned long start = get_tb();
304
305 inbuf = (unsigned long)in;
306 if (check_constraints(inbuf, &inlen, true))
307 return -EINVAL;
308
309 outbuf = (unsigned long)out;
310 if (check_constraints(outbuf, outlen, false))
311 return -EINVAL;
312
313 rcu_read_lock();
314 local_devdata = rcu_dereference(devdata);
315 if (!local_devdata || !local_devdata->dev) {
316 rcu_read_unlock();
317 return -ENODEV;
318 }
319 max_sync_size = local_devdata->max_sync_size;
320 dev = local_devdata->dev;
321
322
323 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
324 slin.entries = (struct nx842_slentry *)workmem->slin;
325 slout.entries = (struct nx842_slentry *)workmem->slout;
326
327
328 op.flags = NX842_OP_COMPRESS_CRC;
329 csbcpb = &workmem->csbcpb;
330 memset(csbcpb, 0, sizeof(*csbcpb));
331 op.csbcpb = nx842_get_pa(csbcpb);
332
333 if ((inbuf & NX842_HW_PAGE_MASK) ==
334 ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
335
336 op.in = nx842_get_pa((void *)inbuf);
337 op.inlen = inlen;
338 } else {
339
340 nx842_build_scatterlist(inbuf, inlen, &slin);
341 op.in = nx842_get_pa(slin.entries);
342 op.inlen = -nx842_get_scatterlist_size(&slin);
343 }
344
345 if ((outbuf & NX842_HW_PAGE_MASK) ==
346 ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
347
348 op.out = nx842_get_pa((void *)outbuf);
349 op.outlen = *outlen;
350 } else {
351
352 nx842_build_scatterlist(outbuf, *outlen, &slout);
353 op.out = nx842_get_pa(slout.entries);
354 op.outlen = -nx842_get_scatterlist_size(&slout);
355 }
356
357 dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
358 __func__, (unsigned long)op.in, (long)op.inlen,
359 (unsigned long)op.out, (long)op.outlen);
360
361
362 ret = vio_h_cop_sync(local_devdata->vdev, &op);
363
364
365 if (ret) {
366 dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
367 __func__, ret, op.hcall_err);
368 ret = -EIO;
369 goto unlock;
370 }
371
372
373 ret = nx842_validate_result(dev, &csbcpb->csb);
374 if (ret)
375 goto unlock;
376
377 *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
378 dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen);
379
380unlock:
381 if (ret)
382 nx842_inc_comp_failed(local_devdata);
383 else {
384 nx842_inc_comp_complete(local_devdata);
385 ibm_nx842_incr_hist(local_devdata->counters->comp_times,
386 (get_tb() - start) / tb_ticks_per_usec);
387 }
388 rcu_read_unlock();
389 return ret;
390}
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
419 unsigned char *out, unsigned int *outlen,
420 void *wmem)
421{
422 struct nx842_devdata *local_devdata;
423 struct device *dev = NULL;
424 struct nx842_workmem *workmem;
425 struct nx842_scatterlist slin, slout;
426 struct nx_csbcpb *csbcpb;
427 int ret = 0, max_sync_size;
428 unsigned long inbuf, outbuf;
429 struct vio_pfo_op op = {
430 .done = NULL,
431 .handle = 0,
432 .timeout = 0,
433 };
434 unsigned long start = get_tb();
435
436
437 inbuf = (unsigned long)in;
438 if (check_constraints(inbuf, &inlen, true))
439 return -EINVAL;
440
441 outbuf = (unsigned long)out;
442 if (check_constraints(outbuf, outlen, false))
443 return -EINVAL;
444
445 rcu_read_lock();
446 local_devdata = rcu_dereference(devdata);
447 if (!local_devdata || !local_devdata->dev) {
448 rcu_read_unlock();
449 return -ENODEV;
450 }
451 max_sync_size = local_devdata->max_sync_size;
452 dev = local_devdata->dev;
453
454 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
455
456
457 slin.entries = (struct nx842_slentry *)workmem->slin;
458 slout.entries = (struct nx842_slentry *)workmem->slout;
459
460
461 op.flags = NX842_OP_DECOMPRESS_CRC;
462 csbcpb = &workmem->csbcpb;
463 memset(csbcpb, 0, sizeof(*csbcpb));
464 op.csbcpb = nx842_get_pa(csbcpb);
465
466 if ((inbuf & NX842_HW_PAGE_MASK) ==
467 ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
468
469 op.in = nx842_get_pa((void *)inbuf);
470 op.inlen = inlen;
471 } else {
472
473 nx842_build_scatterlist(inbuf, inlen, &slin);
474 op.in = nx842_get_pa(slin.entries);
475 op.inlen = -nx842_get_scatterlist_size(&slin);
476 }
477
478 if ((outbuf & NX842_HW_PAGE_MASK) ==
479 ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
480
481 op.out = nx842_get_pa((void *)outbuf);
482 op.outlen = *outlen;
483 } else {
484
485 nx842_build_scatterlist(outbuf, *outlen, &slout);
486 op.out = nx842_get_pa(slout.entries);
487 op.outlen = -nx842_get_scatterlist_size(&slout);
488 }
489
490 dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
491 __func__, (unsigned long)op.in, (long)op.inlen,
492 (unsigned long)op.out, (long)op.outlen);
493
494
495 ret = vio_h_cop_sync(local_devdata->vdev, &op);
496
497
498 if (ret) {
499 dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
500 __func__, ret, op.hcall_err);
501 goto unlock;
502 }
503
504
505 ret = nx842_validate_result(dev, &csbcpb->csb);
506 if (ret)
507 goto unlock;
508
509 *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
510
511unlock:
512 if (ret)
513
514 nx842_inc_decomp_failed(local_devdata);
515 else {
516 nx842_inc_decomp_complete(local_devdata);
517 ibm_nx842_incr_hist(local_devdata->counters->decomp_times,
518 (get_tb() - start) / tb_ticks_per_usec);
519 }
520
521 rcu_read_unlock();
522 return ret;
523}
524
525
526
527
528
529
530
531
532
533
534static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
535{
536 if (devdata) {
537 devdata->max_sync_size = 0;
538 devdata->max_sync_sg = 0;
539 devdata->max_sg_len = 0;
540 return 0;
541 } else
542 return -ENOENT;
543}
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559static int nx842_OF_upd_status(struct property *prop)
560{
561 const char *status = (const char *)prop->value;
562
563 if (!strncmp(status, "okay", (size_t)prop->length))
564 return 0;
565 if (!strncmp(status, "disabled", (size_t)prop->length))
566 return -ENODEV;
567 dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
568
569 return -EINVAL;
570}
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
594 struct property *prop) {
595 int ret = 0;
596 const unsigned int maxsglen = of_read_number(prop->value, 1);
597
598 if (prop->length != sizeof(maxsglen)) {
599 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__);
600 dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__,
601 prop->length, sizeof(maxsglen));
602 ret = -EINVAL;
603 } else {
604 devdata->max_sg_len = min_t(unsigned int,
605 maxsglen, NX842_HW_PAGE_SIZE);
606 }
607
608 return ret;
609}
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata,
642 struct property *prop) {
643 int ret = 0;
644 unsigned int comp_data_limit, decomp_data_limit;
645 unsigned int comp_sg_limit, decomp_sg_limit;
646 const struct maxsynccop_t {
647 __be32 comp_elements;
648 __be32 comp_data_limit;
649 __be32 comp_sg_limit;
650 __be32 decomp_elements;
651 __be32 decomp_data_limit;
652 __be32 decomp_sg_limit;
653 } *maxsynccop;
654
655 if (prop->length != sizeof(*maxsynccop)) {
656 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__);
657 dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length,
658 sizeof(*maxsynccop));
659 ret = -EINVAL;
660 goto out;
661 }
662
663 maxsynccop = (const struct maxsynccop_t *)prop->value;
664 comp_data_limit = be32_to_cpu(maxsynccop->comp_data_limit);
665 comp_sg_limit = be32_to_cpu(maxsynccop->comp_sg_limit);
666 decomp_data_limit = be32_to_cpu(maxsynccop->decomp_data_limit);
667 decomp_sg_limit = be32_to_cpu(maxsynccop->decomp_sg_limit);
668
669
670
671
672
673 devdata->max_sync_size = min(comp_data_limit, decomp_data_limit);
674
675 devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size,
676 65536);
677
678 if (devdata->max_sync_size < 4096) {
679 dev_err(devdata->dev, "%s: hardware max data size (%u) is "
680 "less than the driver minimum, unable to use "
681 "the hardware device\n",
682 __func__, devdata->max_sync_size);
683 ret = -EINVAL;
684 goto out;
685 }
686
687 nx842_pseries_constraints.maximum = devdata->max_sync_size;
688
689 devdata->max_sync_sg = min(comp_sg_limit, decomp_sg_limit);
690 if (devdata->max_sync_sg < 1) {
691 dev_err(devdata->dev, "%s: hardware max sg size (%u) is "
692 "less than the driver minimum, unable to use "
693 "the hardware device\n",
694 __func__, devdata->max_sync_sg);
695 ret = -EINVAL;
696 goto out;
697 }
698
699out:
700 return ret;
701}
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722static int nx842_OF_upd(struct property *new_prop)
723{
724 struct nx842_devdata *old_devdata = NULL;
725 struct nx842_devdata *new_devdata = NULL;
726 struct device_node *of_node = NULL;
727 struct property *status = NULL;
728 struct property *maxsglen = NULL;
729 struct property *maxsyncop = NULL;
730 int ret = 0;
731 unsigned long flags;
732
733 new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
734 if (!new_devdata)
735 return -ENOMEM;
736
737 spin_lock_irqsave(&devdata_mutex, flags);
738 old_devdata = rcu_dereference_check(devdata,
739 lockdep_is_held(&devdata_mutex));
740 if (old_devdata)
741 of_node = old_devdata->dev->of_node;
742
743 if (!old_devdata || !of_node) {
744 pr_err("%s: device is not available\n", __func__);
745 spin_unlock_irqrestore(&devdata_mutex, flags);
746 kfree(new_devdata);
747 return -ENODEV;
748 }
749
750 memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
751 new_devdata->counters = old_devdata->counters;
752
753
754 status = of_find_property(of_node, "status", NULL);
755 maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL);
756 maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL);
757 if (!status || !maxsglen || !maxsyncop) {
758 dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__);
759 ret = -EINVAL;
760 goto error_out;
761 }
762
763
764 if (new_prop) {
765
766 if (!strncmp(new_prop->name, "status", new_prop->length)) {
767 status = new_prop;
768
769 } else if (!strncmp(new_prop->name, "ibm,max-sg-len",
770 new_prop->length)) {
771 maxsglen = new_prop;
772
773 } else if (!strncmp(new_prop->name, "ibm,max-sync-cop",
774 new_prop->length)) {
775 maxsyncop = new_prop;
776
777 } else {
778
779
780
781
782 goto out;
783 }
784 }
785
786
787 ret = nx842_OF_upd_status(status);
788 if (ret)
789 goto error_out;
790
791 ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen);
792 if (ret)
793 goto error_out;
794
795 ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop);
796 if (ret)
797 goto error_out;
798
799out:
800 dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n",
801 __func__, new_devdata->max_sync_size,
802 old_devdata->max_sync_size);
803 dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n",
804 __func__, new_devdata->max_sync_sg,
805 old_devdata->max_sync_sg);
806 dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n",
807 __func__, new_devdata->max_sg_len,
808 old_devdata->max_sg_len);
809
810 rcu_assign_pointer(devdata, new_devdata);
811 spin_unlock_irqrestore(&devdata_mutex, flags);
812 synchronize_rcu();
813 dev_set_drvdata(new_devdata->dev, new_devdata);
814 kfree(old_devdata);
815 return 0;
816
817error_out:
818 if (new_devdata) {
819 dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
820 nx842_OF_set_defaults(new_devdata);
821 rcu_assign_pointer(devdata, new_devdata);
822 spin_unlock_irqrestore(&devdata_mutex, flags);
823 synchronize_rcu();
824 dev_set_drvdata(new_devdata->dev, new_devdata);
825 kfree(old_devdata);
826 } else {
827 dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
828 spin_unlock_irqrestore(&devdata_mutex, flags);
829 }
830
831 if (!ret)
832 ret = -EINVAL;
833 return ret;
834}
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
850 void *update)
851{
852 struct of_prop_reconfig *upd = update;
853 struct nx842_devdata *local_devdata;
854 struct device_node *node = NULL;
855
856 rcu_read_lock();
857 local_devdata = rcu_dereference(devdata);
858 if (local_devdata)
859 node = local_devdata->dev->of_node;
860
861 if (local_devdata &&
862 action == OF_RECONFIG_UPDATE_PROPERTY &&
863 !strcmp(upd->dn->name, node->name)) {
864 rcu_read_unlock();
865 nx842_OF_upd(upd->prop);
866 } else
867 rcu_read_unlock();
868
869 return NOTIFY_OK;
870}
871
872static struct notifier_block nx842_of_nb = {
873 .notifier_call = nx842_OF_notifier,
874};
875
876#define nx842_counter_read(_name) \
877static ssize_t nx842_##_name##_show(struct device *dev, \
878 struct device_attribute *attr, \
879 char *buf) { \
880 struct nx842_devdata *local_devdata; \
881 int p = 0; \
882 rcu_read_lock(); \
883 local_devdata = rcu_dereference(devdata); \
884 if (local_devdata) \
885 p = snprintf(buf, PAGE_SIZE, "%ld\n", \
886 atomic64_read(&local_devdata->counters->_name)); \
887 rcu_read_unlock(); \
888 return p; \
889}
890
891#define NX842DEV_COUNTER_ATTR_RO(_name) \
892 nx842_counter_read(_name); \
893 static struct device_attribute dev_attr_##_name = __ATTR(_name, \
894 0444, \
895 nx842_##_name##_show,\
896 NULL);
897
898NX842DEV_COUNTER_ATTR_RO(comp_complete);
899NX842DEV_COUNTER_ATTR_RO(comp_failed);
900NX842DEV_COUNTER_ATTR_RO(decomp_complete);
901NX842DEV_COUNTER_ATTR_RO(decomp_failed);
902NX842DEV_COUNTER_ATTR_RO(swdecomp);
903
904static ssize_t nx842_timehist_show(struct device *,
905 struct device_attribute *, char *);
906
907static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444,
908 nx842_timehist_show, NULL);
909static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times,
910 0444, nx842_timehist_show, NULL);
911
912static ssize_t nx842_timehist_show(struct device *dev,
913 struct device_attribute *attr, char *buf) {
914 char *p = buf;
915 struct nx842_devdata *local_devdata;
916 atomic64_t *times;
917 int bytes_remain = PAGE_SIZE;
918 int bytes;
919 int i;
920
921 rcu_read_lock();
922 local_devdata = rcu_dereference(devdata);
923 if (!local_devdata) {
924 rcu_read_unlock();
925 return 0;
926 }
927
928 if (attr == &dev_attr_comp_times)
929 times = local_devdata->counters->comp_times;
930 else if (attr == &dev_attr_decomp_times)
931 times = local_devdata->counters->decomp_times;
932 else {
933 rcu_read_unlock();
934 return 0;
935 }
936
937 for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
938 bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n",
939 i ? (2<<(i-1)) : 0, (2<<i)-1,
940 atomic64_read(×[i]));
941 bytes_remain -= bytes;
942 p += bytes;
943 }
944
945
946 bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n",
947 2<<(NX842_HIST_SLOTS - 2),
948 atomic64_read(×[(NX842_HIST_SLOTS - 1)]));
949 p += bytes;
950
951 rcu_read_unlock();
952 return p - buf;
953}
954
955static struct attribute *nx842_sysfs_entries[] = {
956 &dev_attr_comp_complete.attr,
957 &dev_attr_comp_failed.attr,
958 &dev_attr_decomp_complete.attr,
959 &dev_attr_decomp_failed.attr,
960 &dev_attr_swdecomp.attr,
961 &dev_attr_comp_times.attr,
962 &dev_attr_decomp_times.attr,
963 NULL,
964};
965
966static struct attribute_group nx842_attribute_group = {
967 .name = NULL,
968 .attrs = nx842_sysfs_entries,
969};
970
971static struct nx842_driver nx842_pseries_driver = {
972 .name = KBUILD_MODNAME,
973 .owner = THIS_MODULE,
974 .workmem_size = sizeof(struct nx842_workmem),
975 .constraints = &nx842_pseries_constraints,
976 .compress = nx842_pseries_compress,
977 .decompress = nx842_pseries_decompress,
978};
979
980static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
981{
982 return nx842_crypto_init(tfm, &nx842_pseries_driver);
983}
984
985static struct crypto_alg nx842_pseries_alg = {
986 .cra_name = "842",
987 .cra_driver_name = "842-nx",
988 .cra_priority = 300,
989 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
990 .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
991 .cra_module = THIS_MODULE,
992 .cra_init = nx842_pseries_crypto_init,
993 .cra_exit = nx842_crypto_exit,
994 .cra_u = { .compress = {
995 .coa_compress = nx842_crypto_compress,
996 .coa_decompress = nx842_crypto_decompress } }
997};
998
999static int nx842_probe(struct vio_dev *viodev,
1000 const struct vio_device_id *id)
1001{
1002 struct nx842_devdata *old_devdata, *new_devdata = NULL;
1003 unsigned long flags;
1004 int ret = 0;
1005
1006 new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
1007 if (!new_devdata)
1008 return -ENOMEM;
1009
1010 new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
1011 GFP_NOFS);
1012 if (!new_devdata->counters) {
1013 kfree(new_devdata);
1014 return -ENOMEM;
1015 }
1016
1017 spin_lock_irqsave(&devdata_mutex, flags);
1018 old_devdata = rcu_dereference_check(devdata,
1019 lockdep_is_held(&devdata_mutex));
1020
1021 if (old_devdata && old_devdata->vdev != NULL) {
1022 dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
1023 ret = -1;
1024 goto error_unlock;
1025 }
1026
1027 dev_set_drvdata(&viodev->dev, NULL);
1028
1029 new_devdata->vdev = viodev;
1030 new_devdata->dev = &viodev->dev;
1031 nx842_OF_set_defaults(new_devdata);
1032
1033 rcu_assign_pointer(devdata, new_devdata);
1034 spin_unlock_irqrestore(&devdata_mutex, flags);
1035 synchronize_rcu();
1036 kfree(old_devdata);
1037
1038 of_reconfig_notifier_register(&nx842_of_nb);
1039
1040 ret = nx842_OF_upd(NULL);
1041 if (ret)
1042 goto error;
1043
1044 ret = crypto_register_alg(&nx842_pseries_alg);
1045 if (ret) {
1046 dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
1047 goto error;
1048 }
1049
1050 rcu_read_lock();
1051 dev_set_drvdata(&viodev->dev, rcu_dereference(devdata));
1052 rcu_read_unlock();
1053
1054 if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) {
1055 dev_err(&viodev->dev, "could not create sysfs device attributes\n");
1056 ret = -1;
1057 goto error;
1058 }
1059
1060 return 0;
1061
1062error_unlock:
1063 spin_unlock_irqrestore(&devdata_mutex, flags);
1064 if (new_devdata)
1065 kfree(new_devdata->counters);
1066 kfree(new_devdata);
1067error:
1068 return ret;
1069}
1070
1071static int nx842_remove(struct vio_dev *viodev)
1072{
1073 struct nx842_devdata *old_devdata;
1074 unsigned long flags;
1075
1076 pr_info("Removing IBM Power 842 compression device\n");
1077 sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
1078
1079 crypto_unregister_alg(&nx842_pseries_alg);
1080
1081 spin_lock_irqsave(&devdata_mutex, flags);
1082 old_devdata = rcu_dereference_check(devdata,
1083 lockdep_is_held(&devdata_mutex));
1084 of_reconfig_notifier_unregister(&nx842_of_nb);
1085 RCU_INIT_POINTER(devdata, NULL);
1086 spin_unlock_irqrestore(&devdata_mutex, flags);
1087 synchronize_rcu();
1088 dev_set_drvdata(&viodev->dev, NULL);
1089 if (old_devdata)
1090 kfree(old_devdata->counters);
1091 kfree(old_devdata);
1092
1093 return 0;
1094}
1095
1096static struct vio_device_id nx842_vio_driver_ids[] = {
1097 {"ibm,compression-v1", "ibm,compression"},
1098 {"", ""},
1099};
1100
1101static struct vio_driver nx842_vio_driver = {
1102 .name = KBUILD_MODNAME,
1103 .probe = nx842_probe,
1104 .remove = nx842_remove,
1105 .get_desired_dma = nx842_get_desired_dma,
1106 .id_table = nx842_vio_driver_ids,
1107};
1108
1109static int __init nx842_pseries_init(void)
1110{
1111 struct nx842_devdata *new_devdata;
1112 int ret;
1113
1114 if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
1115 return -ENODEV;
1116
1117 RCU_INIT_POINTER(devdata, NULL);
1118 new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
1119 if (!new_devdata) {
1120 pr_err("Could not allocate memory for device data\n");
1121 return -ENOMEM;
1122 }
1123 RCU_INIT_POINTER(devdata, new_devdata);
1124
1125 ret = vio_register_driver(&nx842_vio_driver);
1126 if (ret) {
1127 pr_err("Could not register VIO driver %d\n", ret);
1128
1129 kfree(new_devdata);
1130 return ret;
1131 }
1132
1133 return 0;
1134}
1135
1136module_init(nx842_pseries_init);
1137
1138static void __exit nx842_pseries_exit(void)
1139{
1140 struct nx842_devdata *old_devdata;
1141 unsigned long flags;
1142
1143 crypto_unregister_alg(&nx842_pseries_alg);
1144
1145 spin_lock_irqsave(&devdata_mutex, flags);
1146 old_devdata = rcu_dereference_check(devdata,
1147 lockdep_is_held(&devdata_mutex));
1148 RCU_INIT_POINTER(devdata, NULL);
1149 spin_unlock_irqrestore(&devdata_mutex, flags);
1150 synchronize_rcu();
1151 if (old_devdata && old_devdata->dev)
1152 dev_set_drvdata(old_devdata->dev, NULL);
1153 kfree(old_devdata);
1154 vio_unregister_driver(&nx842_vio_driver);
1155}
1156
1157module_exit(nx842_pseries_exit);
1158
1159