1
2
3
4
5
6
7
8
9
10
11#include <asm/vio.h>
12#include <asm/hvcall.h>
13#include <asm/vas.h>
14
15#include "nx-842.h"
16#include "nx_csbcpb.h"
17
18MODULE_LICENSE("GPL");
19MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
20MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
21MODULE_ALIAS_CRYPTO("842");
22MODULE_ALIAS_CRYPTO("842-nx");
23
24
25
26
27struct hv_nx_cop_caps {
28 __be64 descriptor;
29 __be64 req_max_processed_len;
30 __be64 min_compress_len;
31 __be64 min_decompress_len;
32} __packed __aligned(0x1000);
33
34
35
36
37struct nx_cop_caps {
38 u64 descriptor;
39 u64 req_max_processed_len;
40 u64 min_compress_len;
41 u64 min_decompress_len;
42};
43
44static u64 caps_feat;
45static struct nx_cop_caps nx_cop_caps;
46
47static struct nx842_constraints nx842_pseries_constraints = {
48 .alignment = DDE_BUFFER_ALIGN,
49 .multiple = DDE_BUFFER_LAST_MULT,
50 .minimum = DDE_BUFFER_LAST_MULT,
51 .maximum = PAGE_SIZE,
52};
53
54static int check_constraints(unsigned long buf, unsigned int *len, bool in)
55{
56 if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) {
57 pr_debug("%s buffer 0x%lx not aligned to 0x%x\n",
58 in ? "input" : "output", buf,
59 nx842_pseries_constraints.alignment);
60 return -EINVAL;
61 }
62 if (*len % nx842_pseries_constraints.multiple) {
63 pr_debug("%s buffer len 0x%x not multiple of 0x%x\n",
64 in ? "input" : "output", *len,
65 nx842_pseries_constraints.multiple);
66 if (in)
67 return -EINVAL;
68 *len = round_down(*len, nx842_pseries_constraints.multiple);
69 }
70 if (*len < nx842_pseries_constraints.minimum) {
71 pr_debug("%s buffer len 0x%x under minimum 0x%x\n",
72 in ? "input" : "output", *len,
73 nx842_pseries_constraints.minimum);
74 return -EINVAL;
75 }
76 if (*len > nx842_pseries_constraints.maximum) {
77 pr_debug("%s buffer len 0x%x over maximum 0x%x\n",
78 in ? "input" : "output", *len,
79 nx842_pseries_constraints.maximum);
80 if (in)
81 return -EINVAL;
82 *len = nx842_pseries_constraints.maximum;
83 }
84 return 0;
85}
86
87
88#define WORKMEM_ALIGN (256)
89
90struct nx842_workmem {
91
92 char slin[4096];
93 char slout[4096];
94
95 struct nx_csbcpb csbcpb;
96
97 char padding[WORKMEM_ALIGN];
98} __aligned(WORKMEM_ALIGN);
99
100
101
102#define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7))
103
104
105
106
107
108#define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7))
109#define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6))
110#define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5))
111
112
113#define NX842_HW_PAGE_SIZE (4096)
114#define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1))
115
116struct ibm_nx842_counters {
117 atomic64_t comp_complete;
118 atomic64_t comp_failed;
119 atomic64_t decomp_complete;
120 atomic64_t decomp_failed;
121 atomic64_t swdecomp;
122 atomic64_t comp_times[32];
123 atomic64_t decomp_times[32];
124};
125
126static struct nx842_devdata {
127 struct vio_dev *vdev;
128 struct device *dev;
129 struct ibm_nx842_counters *counters;
130 unsigned int max_sg_len;
131 unsigned int max_sync_size;
132 unsigned int max_sync_sg;
133} __rcu *devdata;
134static DEFINE_SPINLOCK(devdata_mutex);
135
136#define NX842_COUNTER_INC(_x) \
137static inline void nx842_inc_##_x( \
138 const struct nx842_devdata *dev) { \
139 if (dev) \
140 atomic64_inc(&dev->counters->_x); \
141}
142NX842_COUNTER_INC(comp_complete);
143NX842_COUNTER_INC(comp_failed);
144NX842_COUNTER_INC(decomp_complete);
145NX842_COUNTER_INC(decomp_failed);
146NX842_COUNTER_INC(swdecomp);
147
148#define NX842_HIST_SLOTS 16
149
150static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
151{
152 int bucket = fls(time);
153
154 if (bucket)
155 bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
156
157 atomic64_inc(×[bucket]);
158}
159
160
161#define NX842_OP_COMPRESS 0x0
162#define NX842_OP_CRC 0x1
163#define NX842_OP_DECOMPRESS 0x2
164#define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC)
165#define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC)
166#define NX842_OP_ASYNC (1<<23)
167#define NX842_OP_NOTIFY (1<<22)
168#define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8)
169
170static unsigned long nx842_get_desired_dma(struct vio_dev *viodev)
171{
172
173 return 0;
174}
175
176struct nx842_slentry {
177 __be64 ptr;
178 __be64 len;
179};
180
181
182struct nx842_scatterlist {
183 int entry_nr;
184 struct nx842_slentry *entries;
185};
186
187
188static inline unsigned long nx842_get_scatterlist_size(
189 struct nx842_scatterlist *sl)
190{
191 return sl->entry_nr * sizeof(struct nx842_slentry);
192}
193
194static int nx842_build_scatterlist(unsigned long buf, int len,
195 struct nx842_scatterlist *sl)
196{
197 unsigned long entrylen;
198 struct nx842_slentry *entry;
199
200 sl->entry_nr = 0;
201
202 entry = sl->entries;
203 while (len) {
204 entry->ptr = cpu_to_be64(nx842_get_pa((void *)buf));
205 entrylen = min_t(int, len,
206 LEN_ON_SIZE(buf, NX842_HW_PAGE_SIZE));
207 entry->len = cpu_to_be64(entrylen);
208
209 len -= entrylen;
210 buf += entrylen;
211
212 sl->entry_nr++;
213 entry++;
214 }
215
216 return 0;
217}
218
219static int nx842_validate_result(struct device *dev,
220 struct cop_status_block *csb)
221{
222
223 if (!NX842_CSBCBP_VALID_CHK(csb->valid)) {
224 dev_err(dev, "%s: cspcbp not valid upon completion.\n",
225 __func__);
226 dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n",
227 csb->valid,
228 csb->crb_seq_number,
229 csb->completion_code,
230 csb->completion_extension);
231 dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n",
232 be32_to_cpu(csb->processed_byte_count),
233 (unsigned long)be64_to_cpu(csb->address));
234 return -EIO;
235 }
236
237
238 switch (csb->completion_code) {
239 case 0:
240 break;
241 case 64:
242 dev_dbg(dev, "%s: output size larger than input size\n",
243 __func__);
244 break;
245 case 13:
246 dev_dbg(dev, "%s: Out of space in output buffer\n",
247 __func__);
248 return -ENOSPC;
249 case 65:
250 dev_dbg(dev, "%s: CRC mismatch for decompression\n",
251 __func__);
252 return -EINVAL;
253 case 66:
254 case 67:
255 dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
256 __func__, csb->completion_code);
257 return -EINVAL;
258 default:
259 dev_dbg(dev, "%s: Unspecified error (code:%d)\n",
260 __func__, csb->completion_code);
261 return -EIO;
262 }
263
264
265 if (!NX842_CSBCPB_CE2(csb->completion_extension)) {
266 dev_err(dev, "%s: No error returned by hardware, but "
267 "data returned is unusable, contact support.\n"
268 "(Additional info: csbcbp->processed bytes "
269 "does not specify processed bytes for the "
270 "target buffer.)\n", __func__);
271 return -EIO;
272 }
273
274 return 0;
275}
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
303 unsigned char *out, unsigned int *outlen,
304 void *wmem)
305{
306 struct nx842_devdata *local_devdata;
307 struct device *dev = NULL;
308 struct nx842_workmem *workmem;
309 struct nx842_scatterlist slin, slout;
310 struct nx_csbcpb *csbcpb;
311 int ret = 0;
312 unsigned long inbuf, outbuf;
313 struct vio_pfo_op op = {
314 .done = NULL,
315 .handle = 0,
316 .timeout = 0,
317 };
318 unsigned long start = get_tb();
319
320 inbuf = (unsigned long)in;
321 if (check_constraints(inbuf, &inlen, true))
322 return -EINVAL;
323
324 outbuf = (unsigned long)out;
325 if (check_constraints(outbuf, outlen, false))
326 return -EINVAL;
327
328 rcu_read_lock();
329 local_devdata = rcu_dereference(devdata);
330 if (!local_devdata || !local_devdata->dev) {
331 rcu_read_unlock();
332 return -ENODEV;
333 }
334 dev = local_devdata->dev;
335
336
337 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
338 slin.entries = (struct nx842_slentry *)workmem->slin;
339 slout.entries = (struct nx842_slentry *)workmem->slout;
340
341
342 op.flags = NX842_OP_COMPRESS_CRC;
343 csbcpb = &workmem->csbcpb;
344 memset(csbcpb, 0, sizeof(*csbcpb));
345 op.csbcpb = nx842_get_pa(csbcpb);
346
347 if ((inbuf & NX842_HW_PAGE_MASK) ==
348 ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
349
350 op.in = nx842_get_pa((void *)inbuf);
351 op.inlen = inlen;
352 } else {
353
354 nx842_build_scatterlist(inbuf, inlen, &slin);
355 op.in = nx842_get_pa(slin.entries);
356 op.inlen = -nx842_get_scatterlist_size(&slin);
357 }
358
359 if ((outbuf & NX842_HW_PAGE_MASK) ==
360 ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
361
362 op.out = nx842_get_pa((void *)outbuf);
363 op.outlen = *outlen;
364 } else {
365
366 nx842_build_scatterlist(outbuf, *outlen, &slout);
367 op.out = nx842_get_pa(slout.entries);
368 op.outlen = -nx842_get_scatterlist_size(&slout);
369 }
370
371 dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
372 __func__, (unsigned long)op.in, (long)op.inlen,
373 (unsigned long)op.out, (long)op.outlen);
374
375
376 ret = vio_h_cop_sync(local_devdata->vdev, &op);
377
378
379 if (ret) {
380 dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
381 __func__, ret, op.hcall_err);
382 ret = -EIO;
383 goto unlock;
384 }
385
386
387 ret = nx842_validate_result(dev, &csbcpb->csb);
388 if (ret)
389 goto unlock;
390
391 *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
392 dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen);
393
394unlock:
395 if (ret)
396 nx842_inc_comp_failed(local_devdata);
397 else {
398 nx842_inc_comp_complete(local_devdata);
399 ibm_nx842_incr_hist(local_devdata->counters->comp_times,
400 (get_tb() - start) / tb_ticks_per_usec);
401 }
402 rcu_read_unlock();
403 return ret;
404}
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
433 unsigned char *out, unsigned int *outlen,
434 void *wmem)
435{
436 struct nx842_devdata *local_devdata;
437 struct device *dev = NULL;
438 struct nx842_workmem *workmem;
439 struct nx842_scatterlist slin, slout;
440 struct nx_csbcpb *csbcpb;
441 int ret = 0;
442 unsigned long inbuf, outbuf;
443 struct vio_pfo_op op = {
444 .done = NULL,
445 .handle = 0,
446 .timeout = 0,
447 };
448 unsigned long start = get_tb();
449
450
451 inbuf = (unsigned long)in;
452 if (check_constraints(inbuf, &inlen, true))
453 return -EINVAL;
454
455 outbuf = (unsigned long)out;
456 if (check_constraints(outbuf, outlen, false))
457 return -EINVAL;
458
459 rcu_read_lock();
460 local_devdata = rcu_dereference(devdata);
461 if (!local_devdata || !local_devdata->dev) {
462 rcu_read_unlock();
463 return -ENODEV;
464 }
465 dev = local_devdata->dev;
466
467 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
468
469
470 slin.entries = (struct nx842_slentry *)workmem->slin;
471 slout.entries = (struct nx842_slentry *)workmem->slout;
472
473
474 op.flags = NX842_OP_DECOMPRESS_CRC;
475 csbcpb = &workmem->csbcpb;
476 memset(csbcpb, 0, sizeof(*csbcpb));
477 op.csbcpb = nx842_get_pa(csbcpb);
478
479 if ((inbuf & NX842_HW_PAGE_MASK) ==
480 ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
481
482 op.in = nx842_get_pa((void *)inbuf);
483 op.inlen = inlen;
484 } else {
485
486 nx842_build_scatterlist(inbuf, inlen, &slin);
487 op.in = nx842_get_pa(slin.entries);
488 op.inlen = -nx842_get_scatterlist_size(&slin);
489 }
490
491 if ((outbuf & NX842_HW_PAGE_MASK) ==
492 ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
493
494 op.out = nx842_get_pa((void *)outbuf);
495 op.outlen = *outlen;
496 } else {
497
498 nx842_build_scatterlist(outbuf, *outlen, &slout);
499 op.out = nx842_get_pa(slout.entries);
500 op.outlen = -nx842_get_scatterlist_size(&slout);
501 }
502
503 dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
504 __func__, (unsigned long)op.in, (long)op.inlen,
505 (unsigned long)op.out, (long)op.outlen);
506
507
508 ret = vio_h_cop_sync(local_devdata->vdev, &op);
509
510
511 if (ret) {
512 dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
513 __func__, ret, op.hcall_err);
514 goto unlock;
515 }
516
517
518 ret = nx842_validate_result(dev, &csbcpb->csb);
519 if (ret)
520 goto unlock;
521
522 *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
523
524unlock:
525 if (ret)
526
527 nx842_inc_decomp_failed(local_devdata);
528 else {
529 nx842_inc_decomp_complete(local_devdata);
530 ibm_nx842_incr_hist(local_devdata->counters->decomp_times,
531 (get_tb() - start) / tb_ticks_per_usec);
532 }
533
534 rcu_read_unlock();
535 return ret;
536}
537
538
539
540
541
542
543
544
545
546
547static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
548{
549 if (devdata) {
550 devdata->max_sync_size = 0;
551 devdata->max_sync_sg = 0;
552 devdata->max_sg_len = 0;
553 return 0;
554 } else
555 return -ENOENT;
556}
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573static int nx842_OF_upd_status(struct nx842_devdata *devdata,
574 struct property *prop)
575{
576 const char *status = (const char *)prop->value;
577
578 if (!strncmp(status, "okay", (size_t)prop->length))
579 return 0;
580 if (!strncmp(status, "disabled", (size_t)prop->length))
581 return -ENODEV;
582 dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
583
584 return -EINVAL;
585}
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
609 struct property *prop) {
610 int ret = 0;
611 const unsigned int maxsglen = of_read_number(prop->value, 1);
612
613 if (prop->length != sizeof(maxsglen)) {
614 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__);
615 dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__,
616 prop->length, sizeof(maxsglen));
617 ret = -EINVAL;
618 } else {
619 devdata->max_sg_len = min_t(unsigned int,
620 maxsglen, NX842_HW_PAGE_SIZE);
621 }
622
623 return ret;
624}
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata,
657 struct property *prop) {
658 int ret = 0;
659 unsigned int comp_data_limit, decomp_data_limit;
660 unsigned int comp_sg_limit, decomp_sg_limit;
661 const struct maxsynccop_t {
662 __be32 comp_elements;
663 __be32 comp_data_limit;
664 __be32 comp_sg_limit;
665 __be32 decomp_elements;
666 __be32 decomp_data_limit;
667 __be32 decomp_sg_limit;
668 } *maxsynccop;
669
670 if (prop->length != sizeof(*maxsynccop)) {
671 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__);
672 dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length,
673 sizeof(*maxsynccop));
674 ret = -EINVAL;
675 goto out;
676 }
677
678 maxsynccop = (const struct maxsynccop_t *)prop->value;
679 comp_data_limit = be32_to_cpu(maxsynccop->comp_data_limit);
680 comp_sg_limit = be32_to_cpu(maxsynccop->comp_sg_limit);
681 decomp_data_limit = be32_to_cpu(maxsynccop->decomp_data_limit);
682 decomp_sg_limit = be32_to_cpu(maxsynccop->decomp_sg_limit);
683
684
685
686
687
688 devdata->max_sync_size = min(comp_data_limit, decomp_data_limit);
689
690 devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size,
691 65536);
692
693 if (devdata->max_sync_size < 4096) {
694 dev_err(devdata->dev, "%s: hardware max data size (%u) is "
695 "less than the driver minimum, unable to use "
696 "the hardware device\n",
697 __func__, devdata->max_sync_size);
698 ret = -EINVAL;
699 goto out;
700 }
701
702 nx842_pseries_constraints.maximum = devdata->max_sync_size;
703
704 devdata->max_sync_sg = min(comp_sg_limit, decomp_sg_limit);
705 if (devdata->max_sync_sg < 1) {
706 dev_err(devdata->dev, "%s: hardware max sg size (%u) is "
707 "less than the driver minimum, unable to use "
708 "the hardware device\n",
709 __func__, devdata->max_sync_sg);
710 ret = -EINVAL;
711 goto out;
712 }
713
714out:
715 return ret;
716}
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736static int nx842_OF_upd(struct property *new_prop)
737{
738 struct nx842_devdata *old_devdata = NULL;
739 struct nx842_devdata *new_devdata = NULL;
740 struct device_node *of_node = NULL;
741 struct property *status = NULL;
742 struct property *maxsglen = NULL;
743 struct property *maxsyncop = NULL;
744 int ret = 0;
745 unsigned long flags;
746
747 new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
748 if (!new_devdata)
749 return -ENOMEM;
750
751 spin_lock_irqsave(&devdata_mutex, flags);
752 old_devdata = rcu_dereference_check(devdata,
753 lockdep_is_held(&devdata_mutex));
754 if (old_devdata)
755 of_node = old_devdata->dev->of_node;
756
757 if (!old_devdata || !of_node) {
758 pr_err("%s: device is not available\n", __func__);
759 spin_unlock_irqrestore(&devdata_mutex, flags);
760 kfree(new_devdata);
761 return -ENODEV;
762 }
763
764 memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
765 new_devdata->counters = old_devdata->counters;
766
767
768 status = of_find_property(of_node, "status", NULL);
769 maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL);
770 maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL);
771 if (!status || !maxsglen || !maxsyncop) {
772 dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__);
773 ret = -EINVAL;
774 goto error_out;
775 }
776
777
778
779
780
781 if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) ||
782 strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) ||
783 strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length)))
784 goto out;
785
786
787 ret = nx842_OF_upd_status(new_devdata, status);
788 if (ret)
789 goto error_out;
790
791 ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen);
792 if (ret)
793 goto error_out;
794
795 ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop);
796 if (ret)
797 goto error_out;
798
799out:
800 dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n",
801 __func__, new_devdata->max_sync_size,
802 old_devdata->max_sync_size);
803 dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n",
804 __func__, new_devdata->max_sync_sg,
805 old_devdata->max_sync_sg);
806 dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n",
807 __func__, new_devdata->max_sg_len,
808 old_devdata->max_sg_len);
809
810 rcu_assign_pointer(devdata, new_devdata);
811 spin_unlock_irqrestore(&devdata_mutex, flags);
812 synchronize_rcu();
813 dev_set_drvdata(new_devdata->dev, new_devdata);
814 kfree(old_devdata);
815 return 0;
816
817error_out:
818 if (new_devdata) {
819 dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
820 nx842_OF_set_defaults(new_devdata);
821 rcu_assign_pointer(devdata, new_devdata);
822 spin_unlock_irqrestore(&devdata_mutex, flags);
823 synchronize_rcu();
824 dev_set_drvdata(new_devdata->dev, new_devdata);
825 kfree(old_devdata);
826 } else {
827 dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
828 spin_unlock_irqrestore(&devdata_mutex, flags);
829 }
830
831 if (!ret)
832 ret = -EINVAL;
833 return ret;
834}
835
836
837
838
839
840
841
842
843
844
845
846
847
848static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
849 void *data)
850{
851 struct of_reconfig_data *upd = data;
852 struct nx842_devdata *local_devdata;
853 struct device_node *node = NULL;
854
855 rcu_read_lock();
856 local_devdata = rcu_dereference(devdata);
857 if (local_devdata)
858 node = local_devdata->dev->of_node;
859
860 if (local_devdata &&
861 action == OF_RECONFIG_UPDATE_PROPERTY &&
862 !strcmp(upd->dn->name, node->name)) {
863 rcu_read_unlock();
864 nx842_OF_upd(upd->prop);
865 } else
866 rcu_read_unlock();
867
868 return NOTIFY_OK;
869}
870
871static struct notifier_block nx842_of_nb = {
872 .notifier_call = nx842_OF_notifier,
873};
874
875#define nx842_counter_read(_name) \
876static ssize_t nx842_##_name##_show(struct device *dev, \
877 struct device_attribute *attr, \
878 char *buf) { \
879 struct nx842_devdata *local_devdata; \
880 int p = 0; \
881 rcu_read_lock(); \
882 local_devdata = rcu_dereference(devdata); \
883 if (local_devdata) \
884 p = snprintf(buf, PAGE_SIZE, "%lld\n", \
885 atomic64_read(&local_devdata->counters->_name)); \
886 rcu_read_unlock(); \
887 return p; \
888}
889
890#define NX842DEV_COUNTER_ATTR_RO(_name) \
891 nx842_counter_read(_name); \
892 static struct device_attribute dev_attr_##_name = __ATTR(_name, \
893 0444, \
894 nx842_##_name##_show,\
895 NULL);
896
897NX842DEV_COUNTER_ATTR_RO(comp_complete);
898NX842DEV_COUNTER_ATTR_RO(comp_failed);
899NX842DEV_COUNTER_ATTR_RO(decomp_complete);
900NX842DEV_COUNTER_ATTR_RO(decomp_failed);
901NX842DEV_COUNTER_ATTR_RO(swdecomp);
902
903static ssize_t nx842_timehist_show(struct device *,
904 struct device_attribute *, char *);
905
906static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444,
907 nx842_timehist_show, NULL);
908static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times,
909 0444, nx842_timehist_show, NULL);
910
911static ssize_t nx842_timehist_show(struct device *dev,
912 struct device_attribute *attr, char *buf) {
913 char *p = buf;
914 struct nx842_devdata *local_devdata;
915 atomic64_t *times;
916 int bytes_remain = PAGE_SIZE;
917 int bytes;
918 int i;
919
920 rcu_read_lock();
921 local_devdata = rcu_dereference(devdata);
922 if (!local_devdata) {
923 rcu_read_unlock();
924 return 0;
925 }
926
927 if (attr == &dev_attr_comp_times)
928 times = local_devdata->counters->comp_times;
929 else if (attr == &dev_attr_decomp_times)
930 times = local_devdata->counters->decomp_times;
931 else {
932 rcu_read_unlock();
933 return 0;
934 }
935
936 for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
937 bytes = snprintf(p, bytes_remain, "%u-%uus:\t%lld\n",
938 i ? (2<<(i-1)) : 0, (2<<i)-1,
939 atomic64_read(×[i]));
940 bytes_remain -= bytes;
941 p += bytes;
942 }
943
944
945 bytes = snprintf(p, bytes_remain, "%uus - :\t%lld\n",
946 2<<(NX842_HIST_SLOTS - 2),
947 atomic64_read(×[(NX842_HIST_SLOTS - 1)]));
948 p += bytes;
949
950 rcu_read_unlock();
951 return p - buf;
952}
953
954static struct attribute *nx842_sysfs_entries[] = {
955 &dev_attr_comp_complete.attr,
956 &dev_attr_comp_failed.attr,
957 &dev_attr_decomp_complete.attr,
958 &dev_attr_decomp_failed.attr,
959 &dev_attr_swdecomp.attr,
960 &dev_attr_comp_times.attr,
961 &dev_attr_decomp_times.attr,
962 NULL,
963};
964
965static const struct attribute_group nx842_attribute_group = {
966 .name = NULL,
967 .attrs = nx842_sysfs_entries,
968};
969
970#define nxcop_caps_read(_name) \
971static ssize_t nxcop_##_name##_show(struct device *dev, \
972 struct device_attribute *attr, char *buf) \
973{ \
974 return sprintf(buf, "%lld\n", nx_cop_caps._name); \
975}
976
977#define NXCT_ATTR_RO(_name) \
978 nxcop_caps_read(_name); \
979 static struct device_attribute dev_attr_##_name = __ATTR(_name, \
980 0444, \
981 nxcop_##_name##_show, \
982 NULL);
983
984NXCT_ATTR_RO(req_max_processed_len);
985NXCT_ATTR_RO(min_compress_len);
986NXCT_ATTR_RO(min_decompress_len);
987
988static struct attribute *nxcop_caps_sysfs_entries[] = {
989 &dev_attr_req_max_processed_len.attr,
990 &dev_attr_min_compress_len.attr,
991 &dev_attr_min_decompress_len.attr,
992 NULL,
993};
994
995static const struct attribute_group nxcop_caps_attr_group = {
996 .name = "nx_gzip_caps",
997 .attrs = nxcop_caps_sysfs_entries,
998};
999
1000static struct nx842_driver nx842_pseries_driver = {
1001 .name = KBUILD_MODNAME,
1002 .owner = THIS_MODULE,
1003 .workmem_size = sizeof(struct nx842_workmem),
1004 .constraints = &nx842_pseries_constraints,
1005 .compress = nx842_pseries_compress,
1006 .decompress = nx842_pseries_decompress,
1007};
1008
1009static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
1010{
1011 return nx842_crypto_init(tfm, &nx842_pseries_driver);
1012}
1013
1014static struct crypto_alg nx842_pseries_alg = {
1015 .cra_name = "842",
1016 .cra_driver_name = "842-nx",
1017 .cra_priority = 300,
1018 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
1019 .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
1020 .cra_module = THIS_MODULE,
1021 .cra_init = nx842_pseries_crypto_init,
1022 .cra_exit = nx842_crypto_exit,
1023 .cra_u = { .compress = {
1024 .coa_compress = nx842_crypto_compress,
1025 .coa_decompress = nx842_crypto_decompress } }
1026};
1027
1028static int nx842_probe(struct vio_dev *viodev,
1029 const struct vio_device_id *id)
1030{
1031 struct nx842_devdata *old_devdata, *new_devdata = NULL;
1032 unsigned long flags;
1033 int ret = 0;
1034
1035 new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
1036 if (!new_devdata)
1037 return -ENOMEM;
1038
1039 new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
1040 GFP_NOFS);
1041 if (!new_devdata->counters) {
1042 kfree(new_devdata);
1043 return -ENOMEM;
1044 }
1045
1046 spin_lock_irqsave(&devdata_mutex, flags);
1047 old_devdata = rcu_dereference_check(devdata,
1048 lockdep_is_held(&devdata_mutex));
1049
1050 if (old_devdata && old_devdata->vdev != NULL) {
1051 dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
1052 ret = -1;
1053 goto error_unlock;
1054 }
1055
1056 dev_set_drvdata(&viodev->dev, NULL);
1057
1058 new_devdata->vdev = viodev;
1059 new_devdata->dev = &viodev->dev;
1060 nx842_OF_set_defaults(new_devdata);
1061
1062 rcu_assign_pointer(devdata, new_devdata);
1063 spin_unlock_irqrestore(&devdata_mutex, flags);
1064 synchronize_rcu();
1065 kfree(old_devdata);
1066
1067 of_reconfig_notifier_register(&nx842_of_nb);
1068
1069 ret = nx842_OF_upd(NULL);
1070 if (ret)
1071 goto error;
1072
1073 ret = crypto_register_alg(&nx842_pseries_alg);
1074 if (ret) {
1075 dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
1076 goto error;
1077 }
1078
1079 rcu_read_lock();
1080 dev_set_drvdata(&viodev->dev, rcu_dereference(devdata));
1081 rcu_read_unlock();
1082
1083 if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) {
1084 dev_err(&viodev->dev, "could not create sysfs device attributes\n");
1085 ret = -1;
1086 goto error;
1087 }
1088
1089 if (caps_feat) {
1090 if (sysfs_create_group(&viodev->dev.kobj,
1091 &nxcop_caps_attr_group)) {
1092 dev_err(&viodev->dev,
1093 "Could not create sysfs NX capability entries\n");
1094 ret = -1;
1095 goto error;
1096 }
1097 }
1098
1099 return 0;
1100
1101error_unlock:
1102 spin_unlock_irqrestore(&devdata_mutex, flags);
1103 if (new_devdata)
1104 kfree(new_devdata->counters);
1105 kfree(new_devdata);
1106error:
1107 return ret;
1108}
1109
1110static void nx842_remove(struct vio_dev *viodev)
1111{
1112 struct nx842_devdata *old_devdata;
1113 unsigned long flags;
1114
1115 pr_info("Removing IBM Power 842 compression device\n");
1116 sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
1117
1118 if (caps_feat)
1119 sysfs_remove_group(&viodev->dev.kobj, &nxcop_caps_attr_group);
1120
1121 crypto_unregister_alg(&nx842_pseries_alg);
1122
1123 spin_lock_irqsave(&devdata_mutex, flags);
1124 old_devdata = rcu_dereference_check(devdata,
1125 lockdep_is_held(&devdata_mutex));
1126 of_reconfig_notifier_unregister(&nx842_of_nb);
1127 RCU_INIT_POINTER(devdata, NULL);
1128 spin_unlock_irqrestore(&devdata_mutex, flags);
1129 synchronize_rcu();
1130 dev_set_drvdata(&viodev->dev, NULL);
1131 if (old_devdata)
1132 kfree(old_devdata->counters);
1133 kfree(old_devdata);
1134}
1135
1136
1137
1138
1139
1140
1141static void __init nxcop_get_capabilities(void)
1142{
1143 struct hv_vas_all_caps *hv_caps;
1144 struct hv_nx_cop_caps *hv_nxc;
1145 int rc;
1146
1147 hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL);
1148 if (!hv_caps)
1149 return;
1150
1151
1152
1153 rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0,
1154 (u64)virt_to_phys(hv_caps));
1155 if (rc)
1156 goto out;
1157
1158 caps_feat = be64_to_cpu(hv_caps->feat_type);
1159
1160
1161
1162 if (caps_feat & VAS_NX_GZIP_FEAT_BIT) {
1163 hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
1164 if (!hv_nxc)
1165 goto out;
1166
1167
1168
1169 rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
1170 VAS_NX_GZIP_FEAT,
1171 (u64)virt_to_phys(hv_nxc));
1172 } else {
1173 pr_err("NX-GZIP feature is not available\n");
1174 rc = -EINVAL;
1175 }
1176
1177 if (!rc) {
1178 nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor);
1179 nx_cop_caps.req_max_processed_len =
1180 be64_to_cpu(hv_nxc->req_max_processed_len);
1181 nx_cop_caps.min_compress_len =
1182 be64_to_cpu(hv_nxc->min_compress_len);
1183 nx_cop_caps.min_decompress_len =
1184 be64_to_cpu(hv_nxc->min_decompress_len);
1185 } else {
1186 caps_feat = 0;
1187 }
1188
1189 kfree(hv_nxc);
1190out:
1191 kfree(hv_caps);
1192}
1193
1194static const struct vio_device_id nx842_vio_driver_ids[] = {
1195 {"ibm,compression-v1", "ibm,compression"},
1196 {"", ""},
1197};
1198MODULE_DEVICE_TABLE(vio, nx842_vio_driver_ids);
1199
1200static struct vio_driver nx842_vio_driver = {
1201 .name = KBUILD_MODNAME,
1202 .probe = nx842_probe,
1203 .remove = nx842_remove,
1204 .get_desired_dma = nx842_get_desired_dma,
1205 .id_table = nx842_vio_driver_ids,
1206};
1207
1208static int __init nx842_pseries_init(void)
1209{
1210 struct nx842_devdata *new_devdata;
1211 int ret;
1212
1213 if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
1214 return -ENODEV;
1215
1216 RCU_INIT_POINTER(devdata, NULL);
1217 new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
1218 if (!new_devdata)
1219 return -ENOMEM;
1220
1221 RCU_INIT_POINTER(devdata, new_devdata);
1222
1223
1224
1225 nxcop_get_capabilities();
1226
1227 ret = vio_register_driver(&nx842_vio_driver);
1228 if (ret) {
1229 pr_err("Could not register VIO driver %d\n", ret);
1230
1231 kfree(new_devdata);
1232 return ret;
1233 }
1234
1235 ret = vas_register_api_pseries(THIS_MODULE, VAS_COP_TYPE_GZIP,
1236 "nx-gzip");
1237
1238 if (ret)
1239 pr_err("NX-GZIP is not supported. Returned=%d\n", ret);
1240
1241 return 0;
1242}
1243
1244module_init(nx842_pseries_init);
1245
1246static void __exit nx842_pseries_exit(void)
1247{
1248 struct nx842_devdata *old_devdata;
1249 unsigned long flags;
1250
1251 vas_unregister_api_pseries();
1252
1253 crypto_unregister_alg(&nx842_pseries_alg);
1254
1255 spin_lock_irqsave(&devdata_mutex, flags);
1256 old_devdata = rcu_dereference_check(devdata,
1257 lockdep_is_held(&devdata_mutex));
1258 RCU_INIT_POINTER(devdata, NULL);
1259 spin_unlock_irqrestore(&devdata_mutex, flags);
1260 synchronize_rcu();
1261 if (old_devdata && old_devdata->dev)
1262 dev_set_drvdata(old_devdata->dev, NULL);
1263 kfree(old_devdata);
1264 vio_unregister_driver(&nx842_vio_driver);
1265}
1266
1267module_exit(nx842_pseries_exit);
1268
1269