1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <asm/vio.h>
25
26#include "nx-842.h"
27#include "nx_csbcpb.h"
28
29MODULE_LICENSE("GPL");
30MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
31MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
32MODULE_ALIAS_CRYPTO("842");
33MODULE_ALIAS_CRYPTO("842-nx");
34
35static struct nx842_constraints nx842_pseries_constraints = {
36 .alignment = DDE_BUFFER_ALIGN,
37 .multiple = DDE_BUFFER_LAST_MULT,
38 .minimum = DDE_BUFFER_LAST_MULT,
39 .maximum = PAGE_SIZE,
40};
41
42static int check_constraints(unsigned long buf, unsigned int *len, bool in)
43{
44 if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) {
45 pr_debug("%s buffer 0x%lx not aligned to 0x%x\n",
46 in ? "input" : "output", buf,
47 nx842_pseries_constraints.alignment);
48 return -EINVAL;
49 }
50 if (*len % nx842_pseries_constraints.multiple) {
51 pr_debug("%s buffer len 0x%x not multiple of 0x%x\n",
52 in ? "input" : "output", *len,
53 nx842_pseries_constraints.multiple);
54 if (in)
55 return -EINVAL;
56 *len = round_down(*len, nx842_pseries_constraints.multiple);
57 }
58 if (*len < nx842_pseries_constraints.minimum) {
59 pr_debug("%s buffer len 0x%x under minimum 0x%x\n",
60 in ? "input" : "output", *len,
61 nx842_pseries_constraints.minimum);
62 return -EINVAL;
63 }
64 if (*len > nx842_pseries_constraints.maximum) {
65 pr_debug("%s buffer len 0x%x over maximum 0x%x\n",
66 in ? "input" : "output", *len,
67 nx842_pseries_constraints.maximum);
68 if (in)
69 return -EINVAL;
70 *len = nx842_pseries_constraints.maximum;
71 }
72 return 0;
73}
74
75
76#define WORKMEM_ALIGN (256)
77
78struct nx842_workmem {
79
80 char slin[4096];
81 char slout[4096];
82
83 struct nx_csbcpb csbcpb;
84
85 char padding[WORKMEM_ALIGN];
86} __aligned(WORKMEM_ALIGN);
87
88
89
90#define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7))
91
92
93
94
95
96#define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7))
97#define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6))
98#define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5))
99
100
101#define NX842_HW_PAGE_SIZE (4096)
102#define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1))
103
104struct ibm_nx842_counters {
105 atomic64_t comp_complete;
106 atomic64_t comp_failed;
107 atomic64_t decomp_complete;
108 atomic64_t decomp_failed;
109 atomic64_t swdecomp;
110 atomic64_t comp_times[32];
111 atomic64_t decomp_times[32];
112};
113
114static struct nx842_devdata {
115 struct vio_dev *vdev;
116 struct device *dev;
117 struct ibm_nx842_counters *counters;
118 unsigned int max_sg_len;
119 unsigned int max_sync_size;
120 unsigned int max_sync_sg;
121} __rcu *devdata;
122static DEFINE_SPINLOCK(devdata_mutex);
123
124#define NX842_COUNTER_INC(_x) \
125static inline void nx842_inc_##_x( \
126 const struct nx842_devdata *dev) { \
127 if (dev) \
128 atomic64_inc(&dev->counters->_x); \
129}
130NX842_COUNTER_INC(comp_complete);
131NX842_COUNTER_INC(comp_failed);
132NX842_COUNTER_INC(decomp_complete);
133NX842_COUNTER_INC(decomp_failed);
134NX842_COUNTER_INC(swdecomp);
135
136#define NX842_HIST_SLOTS 16
137
138static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
139{
140 int bucket = fls(time);
141
142 if (bucket)
143 bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
144
145 atomic64_inc(×[bucket]);
146}
147
148
149#define NX842_OP_COMPRESS 0x0
150#define NX842_OP_CRC 0x1
151#define NX842_OP_DECOMPRESS 0x2
152#define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC)
153#define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC)
154#define NX842_OP_ASYNC (1<<23)
155#define NX842_OP_NOTIFY (1<<22)
156#define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8)
157
158static unsigned long nx842_get_desired_dma(struct vio_dev *viodev)
159{
160
161 return 0;
162}
163
164struct nx842_slentry {
165 __be64 ptr;
166 __be64 len;
167};
168
169
170struct nx842_scatterlist {
171 int entry_nr;
172 struct nx842_slentry *entries;
173};
174
175
176static inline unsigned long nx842_get_scatterlist_size(
177 struct nx842_scatterlist *sl)
178{
179 return sl->entry_nr * sizeof(struct nx842_slentry);
180}
181
182static int nx842_build_scatterlist(unsigned long buf, int len,
183 struct nx842_scatterlist *sl)
184{
185 unsigned long entrylen;
186 struct nx842_slentry *entry;
187
188 sl->entry_nr = 0;
189
190 entry = sl->entries;
191 while (len) {
192 entry->ptr = cpu_to_be64(nx842_get_pa((void *)buf));
193 entrylen = min_t(int, len,
194 LEN_ON_SIZE(buf, NX842_HW_PAGE_SIZE));
195 entry->len = cpu_to_be64(entrylen);
196
197 len -= entrylen;
198 buf += entrylen;
199
200 sl->entry_nr++;
201 entry++;
202 }
203
204 return 0;
205}
206
207static int nx842_validate_result(struct device *dev,
208 struct cop_status_block *csb)
209{
210
211 if (!NX842_CSBCBP_VALID_CHK(csb->valid)) {
212 dev_err(dev, "%s: cspcbp not valid upon completion.\n",
213 __func__);
214 dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n",
215 csb->valid,
216 csb->crb_seq_number,
217 csb->completion_code,
218 csb->completion_extension);
219 dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n",
220 be32_to_cpu(csb->processed_byte_count),
221 (unsigned long)be64_to_cpu(csb->address));
222 return -EIO;
223 }
224
225
226 switch (csb->completion_code) {
227 case 0:
228 break;
229 case 64:
230 dev_dbg(dev, "%s: output size larger than input size\n",
231 __func__);
232 break;
233 case 13:
234 dev_dbg(dev, "%s: Out of space in output buffer\n",
235 __func__);
236 return -ENOSPC;
237 case 65:
238 dev_dbg(dev, "%s: CRC mismatch for decompression\n",
239 __func__);
240 return -EINVAL;
241 case 66:
242 case 67:
243 dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
244 __func__, csb->completion_code);
245 return -EINVAL;
246 default:
247 dev_dbg(dev, "%s: Unspecified error (code:%d)\n",
248 __func__, csb->completion_code);
249 return -EIO;
250 }
251
252
253 if (!NX842_CSBCPB_CE2(csb->completion_extension)) {
254 dev_err(dev, "%s: No error returned by hardware, but "
255 "data returned is unusable, contact support.\n"
256 "(Additional info: csbcbp->processed bytes "
257 "does not specify processed bytes for the "
258 "target buffer.)\n", __func__);
259 return -EIO;
260 }
261
262 return 0;
263}
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
291 unsigned char *out, unsigned int *outlen,
292 void *wmem)
293{
294 struct nx842_devdata *local_devdata;
295 struct device *dev = NULL;
296 struct nx842_workmem *workmem;
297 struct nx842_scatterlist slin, slout;
298 struct nx_csbcpb *csbcpb;
299 int ret = 0, max_sync_size;
300 unsigned long inbuf, outbuf;
301 struct vio_pfo_op op = {
302 .done = NULL,
303 .handle = 0,
304 .timeout = 0,
305 };
306 unsigned long start = get_tb();
307
308 inbuf = (unsigned long)in;
309 if (check_constraints(inbuf, &inlen, true))
310 return -EINVAL;
311
312 outbuf = (unsigned long)out;
313 if (check_constraints(outbuf, outlen, false))
314 return -EINVAL;
315
316 rcu_read_lock();
317 local_devdata = rcu_dereference(devdata);
318 if (!local_devdata || !local_devdata->dev) {
319 rcu_read_unlock();
320 return -ENODEV;
321 }
322 max_sync_size = local_devdata->max_sync_size;
323 dev = local_devdata->dev;
324
325
326 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
327 slin.entries = (struct nx842_slentry *)workmem->slin;
328 slout.entries = (struct nx842_slentry *)workmem->slout;
329
330
331 op.flags = NX842_OP_COMPRESS_CRC;
332 csbcpb = &workmem->csbcpb;
333 memset(csbcpb, 0, sizeof(*csbcpb));
334 op.csbcpb = nx842_get_pa(csbcpb);
335
336 if ((inbuf & NX842_HW_PAGE_MASK) ==
337 ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
338
339 op.in = nx842_get_pa((void *)inbuf);
340 op.inlen = inlen;
341 } else {
342
343 nx842_build_scatterlist(inbuf, inlen, &slin);
344 op.in = nx842_get_pa(slin.entries);
345 op.inlen = -nx842_get_scatterlist_size(&slin);
346 }
347
348 if ((outbuf & NX842_HW_PAGE_MASK) ==
349 ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
350
351 op.out = nx842_get_pa((void *)outbuf);
352 op.outlen = *outlen;
353 } else {
354
355 nx842_build_scatterlist(outbuf, *outlen, &slout);
356 op.out = nx842_get_pa(slout.entries);
357 op.outlen = -nx842_get_scatterlist_size(&slout);
358 }
359
360 dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
361 __func__, (unsigned long)op.in, (long)op.inlen,
362 (unsigned long)op.out, (long)op.outlen);
363
364
365 ret = vio_h_cop_sync(local_devdata->vdev, &op);
366
367
368 if (ret) {
369 dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
370 __func__, ret, op.hcall_err);
371 ret = -EIO;
372 goto unlock;
373 }
374
375
376 ret = nx842_validate_result(dev, &csbcpb->csb);
377 if (ret)
378 goto unlock;
379
380 *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
381 dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen);
382
383unlock:
384 if (ret)
385 nx842_inc_comp_failed(local_devdata);
386 else {
387 nx842_inc_comp_complete(local_devdata);
388 ibm_nx842_incr_hist(local_devdata->counters->comp_times,
389 (get_tb() - start) / tb_ticks_per_usec);
390 }
391 rcu_read_unlock();
392 return ret;
393}
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
422 unsigned char *out, unsigned int *outlen,
423 void *wmem)
424{
425 struct nx842_devdata *local_devdata;
426 struct device *dev = NULL;
427 struct nx842_workmem *workmem;
428 struct nx842_scatterlist slin, slout;
429 struct nx_csbcpb *csbcpb;
430 int ret = 0, max_sync_size;
431 unsigned long inbuf, outbuf;
432 struct vio_pfo_op op = {
433 .done = NULL,
434 .handle = 0,
435 .timeout = 0,
436 };
437 unsigned long start = get_tb();
438
439
440 inbuf = (unsigned long)in;
441 if (check_constraints(inbuf, &inlen, true))
442 return -EINVAL;
443
444 outbuf = (unsigned long)out;
445 if (check_constraints(outbuf, outlen, false))
446 return -EINVAL;
447
448 rcu_read_lock();
449 local_devdata = rcu_dereference(devdata);
450 if (!local_devdata || !local_devdata->dev) {
451 rcu_read_unlock();
452 return -ENODEV;
453 }
454 max_sync_size = local_devdata->max_sync_size;
455 dev = local_devdata->dev;
456
457 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
458
459
460 slin.entries = (struct nx842_slentry *)workmem->slin;
461 slout.entries = (struct nx842_slentry *)workmem->slout;
462
463
464 op.flags = NX842_OP_DECOMPRESS_CRC;
465 csbcpb = &workmem->csbcpb;
466 memset(csbcpb, 0, sizeof(*csbcpb));
467 op.csbcpb = nx842_get_pa(csbcpb);
468
469 if ((inbuf & NX842_HW_PAGE_MASK) ==
470 ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
471
472 op.in = nx842_get_pa((void *)inbuf);
473 op.inlen = inlen;
474 } else {
475
476 nx842_build_scatterlist(inbuf, inlen, &slin);
477 op.in = nx842_get_pa(slin.entries);
478 op.inlen = -nx842_get_scatterlist_size(&slin);
479 }
480
481 if ((outbuf & NX842_HW_PAGE_MASK) ==
482 ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
483
484 op.out = nx842_get_pa((void *)outbuf);
485 op.outlen = *outlen;
486 } else {
487
488 nx842_build_scatterlist(outbuf, *outlen, &slout);
489 op.out = nx842_get_pa(slout.entries);
490 op.outlen = -nx842_get_scatterlist_size(&slout);
491 }
492
493 dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
494 __func__, (unsigned long)op.in, (long)op.inlen,
495 (unsigned long)op.out, (long)op.outlen);
496
497
498 ret = vio_h_cop_sync(local_devdata->vdev, &op);
499
500
501 if (ret) {
502 dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
503 __func__, ret, op.hcall_err);
504 goto unlock;
505 }
506
507
508 ret = nx842_validate_result(dev, &csbcpb->csb);
509 if (ret)
510 goto unlock;
511
512 *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
513
514unlock:
515 if (ret)
516
517 nx842_inc_decomp_failed(local_devdata);
518 else {
519 nx842_inc_decomp_complete(local_devdata);
520 ibm_nx842_incr_hist(local_devdata->counters->decomp_times,
521 (get_tb() - start) / tb_ticks_per_usec);
522 }
523
524 rcu_read_unlock();
525 return ret;
526}
527
528
529
530
531
532
533
534
535
536
537static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
538{
539 if (devdata) {
540 devdata->max_sync_size = 0;
541 devdata->max_sync_sg = 0;
542 devdata->max_sg_len = 0;
543 return 0;
544 } else
545 return -ENOENT;
546}
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562static int nx842_OF_upd_status(struct property *prop)
563{
564 const char *status = (const char *)prop->value;
565
566 if (!strncmp(status, "okay", (size_t)prop->length))
567 return 0;
568 if (!strncmp(status, "disabled", (size_t)prop->length))
569 return -ENODEV;
570 dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
571
572 return -EINVAL;
573}
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
597 struct property *prop) {
598 int ret = 0;
599 const unsigned int maxsglen = of_read_number(prop->value, 1);
600
601 if (prop->length != sizeof(maxsglen)) {
602 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__);
603 dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__,
604 prop->length, sizeof(maxsglen));
605 ret = -EINVAL;
606 } else {
607 devdata->max_sg_len = min_t(unsigned int,
608 maxsglen, NX842_HW_PAGE_SIZE);
609 }
610
611 return ret;
612}
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata,
645 struct property *prop) {
646 int ret = 0;
647 unsigned int comp_data_limit, decomp_data_limit;
648 unsigned int comp_sg_limit, decomp_sg_limit;
649 const struct maxsynccop_t {
650 __be32 comp_elements;
651 __be32 comp_data_limit;
652 __be32 comp_sg_limit;
653 __be32 decomp_elements;
654 __be32 decomp_data_limit;
655 __be32 decomp_sg_limit;
656 } *maxsynccop;
657
658 if (prop->length != sizeof(*maxsynccop)) {
659 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__);
660 dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length,
661 sizeof(*maxsynccop));
662 ret = -EINVAL;
663 goto out;
664 }
665
666 maxsynccop = (const struct maxsynccop_t *)prop->value;
667 comp_data_limit = be32_to_cpu(maxsynccop->comp_data_limit);
668 comp_sg_limit = be32_to_cpu(maxsynccop->comp_sg_limit);
669 decomp_data_limit = be32_to_cpu(maxsynccop->decomp_data_limit);
670 decomp_sg_limit = be32_to_cpu(maxsynccop->decomp_sg_limit);
671
672
673
674
675
676 devdata->max_sync_size = min(comp_data_limit, decomp_data_limit);
677
678 devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size,
679 65536);
680
681 if (devdata->max_sync_size < 4096) {
682 dev_err(devdata->dev, "%s: hardware max data size (%u) is "
683 "less than the driver minimum, unable to use "
684 "the hardware device\n",
685 __func__, devdata->max_sync_size);
686 ret = -EINVAL;
687 goto out;
688 }
689
690 nx842_pseries_constraints.maximum = devdata->max_sync_size;
691
692 devdata->max_sync_sg = min(comp_sg_limit, decomp_sg_limit);
693 if (devdata->max_sync_sg < 1) {
694 dev_err(devdata->dev, "%s: hardware max sg size (%u) is "
695 "less than the driver minimum, unable to use "
696 "the hardware device\n",
697 __func__, devdata->max_sync_sg);
698 ret = -EINVAL;
699 goto out;
700 }
701
702out:
703 return ret;
704}
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725static int nx842_OF_upd(struct property *new_prop)
726{
727 struct nx842_devdata *old_devdata = NULL;
728 struct nx842_devdata *new_devdata = NULL;
729 struct device_node *of_node = NULL;
730 struct property *status = NULL;
731 struct property *maxsglen = NULL;
732 struct property *maxsyncop = NULL;
733 int ret = 0;
734 unsigned long flags;
735
736 new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
737 if (!new_devdata)
738 return -ENOMEM;
739
740 spin_lock_irqsave(&devdata_mutex, flags);
741 old_devdata = rcu_dereference_check(devdata,
742 lockdep_is_held(&devdata_mutex));
743 if (old_devdata)
744 of_node = old_devdata->dev->of_node;
745
746 if (!old_devdata || !of_node) {
747 pr_err("%s: device is not available\n", __func__);
748 spin_unlock_irqrestore(&devdata_mutex, flags);
749 kfree(new_devdata);
750 return -ENODEV;
751 }
752
753 memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
754 new_devdata->counters = old_devdata->counters;
755
756
757 status = of_find_property(of_node, "status", NULL);
758 maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL);
759 maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL);
760 if (!status || !maxsglen || !maxsyncop) {
761 dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__);
762 ret = -EINVAL;
763 goto error_out;
764 }
765
766
767
768
769
770 if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) ||
771 strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) ||
772 strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length)))
773 goto out;
774
775
776 ret = nx842_OF_upd_status(status);
777 if (ret)
778 goto error_out;
779
780 ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen);
781 if (ret)
782 goto error_out;
783
784 ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop);
785 if (ret)
786 goto error_out;
787
788out:
789 dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n",
790 __func__, new_devdata->max_sync_size,
791 old_devdata->max_sync_size);
792 dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n",
793 __func__, new_devdata->max_sync_sg,
794 old_devdata->max_sync_sg);
795 dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n",
796 __func__, new_devdata->max_sg_len,
797 old_devdata->max_sg_len);
798
799 rcu_assign_pointer(devdata, new_devdata);
800 spin_unlock_irqrestore(&devdata_mutex, flags);
801 synchronize_rcu();
802 dev_set_drvdata(new_devdata->dev, new_devdata);
803 kfree(old_devdata);
804 return 0;
805
806error_out:
807 if (new_devdata) {
808 dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
809 nx842_OF_set_defaults(new_devdata);
810 rcu_assign_pointer(devdata, new_devdata);
811 spin_unlock_irqrestore(&devdata_mutex, flags);
812 synchronize_rcu();
813 dev_set_drvdata(new_devdata->dev, new_devdata);
814 kfree(old_devdata);
815 } else {
816 dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
817 spin_unlock_irqrestore(&devdata_mutex, flags);
818 }
819
820 if (!ret)
821 ret = -EINVAL;
822 return ret;
823}
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
839 void *data)
840{
841 struct of_reconfig_data *upd = data;
842 struct nx842_devdata *local_devdata;
843 struct device_node *node = NULL;
844
845 rcu_read_lock();
846 local_devdata = rcu_dereference(devdata);
847 if (local_devdata)
848 node = local_devdata->dev->of_node;
849
850 if (local_devdata &&
851 action == OF_RECONFIG_UPDATE_PROPERTY &&
852 !strcmp(upd->dn->name, node->name)) {
853 rcu_read_unlock();
854 nx842_OF_upd(upd->prop);
855 } else
856 rcu_read_unlock();
857
858 return NOTIFY_OK;
859}
860
861static struct notifier_block nx842_of_nb = {
862 .notifier_call = nx842_OF_notifier,
863};
864
865#define nx842_counter_read(_name) \
866static ssize_t nx842_##_name##_show(struct device *dev, \
867 struct device_attribute *attr, \
868 char *buf) { \
869 struct nx842_devdata *local_devdata; \
870 int p = 0; \
871 rcu_read_lock(); \
872 local_devdata = rcu_dereference(devdata); \
873 if (local_devdata) \
874 p = snprintf(buf, PAGE_SIZE, "%ld\n", \
875 atomic64_read(&local_devdata->counters->_name)); \
876 rcu_read_unlock(); \
877 return p; \
878}
879
880#define NX842DEV_COUNTER_ATTR_RO(_name) \
881 nx842_counter_read(_name); \
882 static struct device_attribute dev_attr_##_name = __ATTR(_name, \
883 0444, \
884 nx842_##_name##_show,\
885 NULL);
886
887NX842DEV_COUNTER_ATTR_RO(comp_complete);
888NX842DEV_COUNTER_ATTR_RO(comp_failed);
889NX842DEV_COUNTER_ATTR_RO(decomp_complete);
890NX842DEV_COUNTER_ATTR_RO(decomp_failed);
891NX842DEV_COUNTER_ATTR_RO(swdecomp);
892
893static ssize_t nx842_timehist_show(struct device *,
894 struct device_attribute *, char *);
895
896static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444,
897 nx842_timehist_show, NULL);
898static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times,
899 0444, nx842_timehist_show, NULL);
900
901static ssize_t nx842_timehist_show(struct device *dev,
902 struct device_attribute *attr, char *buf) {
903 char *p = buf;
904 struct nx842_devdata *local_devdata;
905 atomic64_t *times;
906 int bytes_remain = PAGE_SIZE;
907 int bytes;
908 int i;
909
910 rcu_read_lock();
911 local_devdata = rcu_dereference(devdata);
912 if (!local_devdata) {
913 rcu_read_unlock();
914 return 0;
915 }
916
917 if (attr == &dev_attr_comp_times)
918 times = local_devdata->counters->comp_times;
919 else if (attr == &dev_attr_decomp_times)
920 times = local_devdata->counters->decomp_times;
921 else {
922 rcu_read_unlock();
923 return 0;
924 }
925
926 for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
927 bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n",
928 i ? (2<<(i-1)) : 0, (2<<i)-1,
929 atomic64_read(×[i]));
930 bytes_remain -= bytes;
931 p += bytes;
932 }
933
934
935 bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n",
936 2<<(NX842_HIST_SLOTS - 2),
937 atomic64_read(×[(NX842_HIST_SLOTS - 1)]));
938 p += bytes;
939
940 rcu_read_unlock();
941 return p - buf;
942}
943
944static struct attribute *nx842_sysfs_entries[] = {
945 &dev_attr_comp_complete.attr,
946 &dev_attr_comp_failed.attr,
947 &dev_attr_decomp_complete.attr,
948 &dev_attr_decomp_failed.attr,
949 &dev_attr_swdecomp.attr,
950 &dev_attr_comp_times.attr,
951 &dev_attr_decomp_times.attr,
952 NULL,
953};
954
955static struct attribute_group nx842_attribute_group = {
956 .name = NULL,
957 .attrs = nx842_sysfs_entries,
958};
959
960static struct nx842_driver nx842_pseries_driver = {
961 .name = KBUILD_MODNAME,
962 .owner = THIS_MODULE,
963 .workmem_size = sizeof(struct nx842_workmem),
964 .constraints = &nx842_pseries_constraints,
965 .compress = nx842_pseries_compress,
966 .decompress = nx842_pseries_decompress,
967};
968
969static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
970{
971 return nx842_crypto_init(tfm, &nx842_pseries_driver);
972}
973
974static struct crypto_alg nx842_pseries_alg = {
975 .cra_name = "842",
976 .cra_driver_name = "842-nx",
977 .cra_priority = 300,
978 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
979 .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
980 .cra_module = THIS_MODULE,
981 .cra_init = nx842_pseries_crypto_init,
982 .cra_exit = nx842_crypto_exit,
983 .cra_u = { .compress = {
984 .coa_compress = nx842_crypto_compress,
985 .coa_decompress = nx842_crypto_decompress } }
986};
987
988static int nx842_probe(struct vio_dev *viodev,
989 const struct vio_device_id *id)
990{
991 struct nx842_devdata *old_devdata, *new_devdata = NULL;
992 unsigned long flags;
993 int ret = 0;
994
995 new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
996 if (!new_devdata)
997 return -ENOMEM;
998
999 new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
1000 GFP_NOFS);
1001 if (!new_devdata->counters) {
1002 kfree(new_devdata);
1003 return -ENOMEM;
1004 }
1005
1006 spin_lock_irqsave(&devdata_mutex, flags);
1007 old_devdata = rcu_dereference_check(devdata,
1008 lockdep_is_held(&devdata_mutex));
1009
1010 if (old_devdata && old_devdata->vdev != NULL) {
1011 dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
1012 ret = -1;
1013 goto error_unlock;
1014 }
1015
1016 dev_set_drvdata(&viodev->dev, NULL);
1017
1018 new_devdata->vdev = viodev;
1019 new_devdata->dev = &viodev->dev;
1020 nx842_OF_set_defaults(new_devdata);
1021
1022 rcu_assign_pointer(devdata, new_devdata);
1023 spin_unlock_irqrestore(&devdata_mutex, flags);
1024 synchronize_rcu();
1025 kfree(old_devdata);
1026
1027 of_reconfig_notifier_register(&nx842_of_nb);
1028
1029 ret = nx842_OF_upd(NULL);
1030 if (ret)
1031 goto error;
1032
1033 ret = crypto_register_alg(&nx842_pseries_alg);
1034 if (ret) {
1035 dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
1036 goto error;
1037 }
1038
1039 rcu_read_lock();
1040 dev_set_drvdata(&viodev->dev, rcu_dereference(devdata));
1041 rcu_read_unlock();
1042
1043 if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) {
1044 dev_err(&viodev->dev, "could not create sysfs device attributes\n");
1045 ret = -1;
1046 goto error;
1047 }
1048
1049 return 0;
1050
1051error_unlock:
1052 spin_unlock_irqrestore(&devdata_mutex, flags);
1053 if (new_devdata)
1054 kfree(new_devdata->counters);
1055 kfree(new_devdata);
1056error:
1057 return ret;
1058}
1059
1060static int nx842_remove(struct vio_dev *viodev)
1061{
1062 struct nx842_devdata *old_devdata;
1063 unsigned long flags;
1064
1065 pr_info("Removing IBM Power 842 compression device\n");
1066 sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
1067
1068 crypto_unregister_alg(&nx842_pseries_alg);
1069
1070 spin_lock_irqsave(&devdata_mutex, flags);
1071 old_devdata = rcu_dereference_check(devdata,
1072 lockdep_is_held(&devdata_mutex));
1073 of_reconfig_notifier_unregister(&nx842_of_nb);
1074 RCU_INIT_POINTER(devdata, NULL);
1075 spin_unlock_irqrestore(&devdata_mutex, flags);
1076 synchronize_rcu();
1077 dev_set_drvdata(&viodev->dev, NULL);
1078 if (old_devdata)
1079 kfree(old_devdata->counters);
1080 kfree(old_devdata);
1081
1082 return 0;
1083}
1084
1085static const struct vio_device_id nx842_vio_driver_ids[] = {
1086 {"ibm,compression-v1", "ibm,compression"},
1087 {"", ""},
1088};
1089
1090static struct vio_driver nx842_vio_driver = {
1091 .name = KBUILD_MODNAME,
1092 .probe = nx842_probe,
1093 .remove = nx842_remove,
1094 .get_desired_dma = nx842_get_desired_dma,
1095 .id_table = nx842_vio_driver_ids,
1096};
1097
1098static int __init nx842_pseries_init(void)
1099{
1100 struct nx842_devdata *new_devdata;
1101 int ret;
1102
1103 if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
1104 return -ENODEV;
1105
1106 RCU_INIT_POINTER(devdata, NULL);
1107 new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
1108 if (!new_devdata)
1109 return -ENOMEM;
1110
1111 RCU_INIT_POINTER(devdata, new_devdata);
1112
1113 ret = vio_register_driver(&nx842_vio_driver);
1114 if (ret) {
1115 pr_err("Could not register VIO driver %d\n", ret);
1116
1117 kfree(new_devdata);
1118 return ret;
1119 }
1120
1121 return 0;
1122}
1123
1124module_init(nx842_pseries_init);
1125
1126static void __exit nx842_pseries_exit(void)
1127{
1128 struct nx842_devdata *old_devdata;
1129 unsigned long flags;
1130
1131 crypto_unregister_alg(&nx842_pseries_alg);
1132
1133 spin_lock_irqsave(&devdata_mutex, flags);
1134 old_devdata = rcu_dereference_check(devdata,
1135 lockdep_is_held(&devdata_mutex));
1136 RCU_INIT_POINTER(devdata, NULL);
1137 spin_unlock_irqrestore(&devdata_mutex, flags);
1138 synchronize_rcu();
1139 if (old_devdata && old_devdata->dev)
1140 dev_set_drvdata(old_devdata->dev, NULL);
1141 kfree(old_devdata);
1142 vio_unregister_driver(&nx842_vio_driver);
1143}
1144
1145module_exit(nx842_pseries_exit);
1146
1147