1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/kernel.h>
26#include <linux/dma-mapping.h>
27#include <linux/sched.h>
28#include <linux/vmalloc.h>
29#include <linux/page-flags.h>
30#include <linux/scatterlist.h>
31#include <linux/hugetlb.h>
32#include <linux/iommu.h>
33#include <linux/delay.h>
34#include <linux/pci.h>
35#include <linux/dma-mapping.h>
36#include <linux/ctype.h>
37#include <linux/module.h>
38#include <linux/platform_device.h>
39#include <linux/delay.h>
40#include <asm/pgtable.h>
41
42#include "genwqe_driver.h"
43#include "card_base.h"
44#include "card_ddcb.h"
45
46
47
48
49
50
51
52
53
54int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val)
55{
56 struct pci_dev *pci_dev = cd->pci_dev;
57
58 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
59 return -EIO;
60
61 if (cd->mmio == NULL)
62 return -EIO;
63
64 if (pci_channel_offline(pci_dev))
65 return -EIO;
66
67 __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs);
68 return 0;
69}
70
71
72
73
74
75
76
77
78u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs)
79{
80 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
81 return 0xffffffffffffffffull;
82
83 if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) &&
84 (byte_offs == IO_SLC_CFGREG_GFIR))
85 return 0x000000000000ffffull;
86
87 if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) &&
88 (byte_offs == IO_SLC_CFGREG_GFIR))
89 return 0x00000000ffff0000ull;
90
91 if (cd->mmio == NULL)
92 return 0xffffffffffffffffull;
93
94 return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs));
95}
96
97
98
99
100
101
102
103
104
105int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val)
106{
107 struct pci_dev *pci_dev = cd->pci_dev;
108
109 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
110 return -EIO;
111
112 if (cd->mmio == NULL)
113 return -EIO;
114
115 if (pci_channel_offline(pci_dev))
116 return -EIO;
117
118 __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs);
119 return 0;
120}
121
122
123
124
125
126
127
128
129u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
130{
131 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
132 return 0xffffffff;
133
134 if (cd->mmio == NULL)
135 return 0xffffffff;
136
137 return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs));
138}
139
140
141
142
143
144
145int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len)
146{
147 int i, j;
148 u32 app_id = (u32)cd->app_unitcfg;
149
150 memset(app_name, 0, len);
151 for (i = 0, j = 0; j < min(len, 4); j++) {
152 char ch = (char)((app_id >> (24 - j*8)) & 0xff);
153
154 if (ch == ' ')
155 continue;
156 app_name[i++] = isprint(ch) ? ch : 'X';
157 }
158 return i;
159}
160
161
162
163
164
165
166
167
168
169#define CRC32_POLYNOMIAL 0x20044009
170static u32 crc32_tab[256];
171
172void genwqe_init_crc32(void)
173{
174 int i, j;
175 u32 crc;
176
177 for (i = 0; i < 256; i++) {
178 crc = i << 24;
179 for (j = 0; j < 8; j++) {
180 if (crc & 0x80000000)
181 crc = (crc << 1) ^ CRC32_POLYNOMIAL;
182 else
183 crc = (crc << 1);
184 }
185 crc32_tab[i] = crc;
186 }
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
205{
206 int i;
207 u32 crc;
208
209 crc = init;
210 while (len--) {
211 i = ((crc >> 24) ^ *buff++) & 0xFF;
212 crc = (crc << 8) ^ crc32_tab[i];
213 }
214 return crc;
215}
216
217void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
218 dma_addr_t *dma_handle)
219{
220 if (get_order(size) > MAX_ORDER)
221 return NULL;
222
223 return pci_alloc_consistent(cd->pci_dev, size, dma_handle);
224}
225
226void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
227 void *vaddr, dma_addr_t dma_handle)
228{
229 if (vaddr == NULL)
230 return;
231
232 pci_free_consistent(cd->pci_dev, size, vaddr, dma_handle);
233}
234
235static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
236 int num_pages)
237{
238 int i;
239 struct pci_dev *pci_dev = cd->pci_dev;
240
241 for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) {
242 pci_unmap_page(pci_dev, dma_list[i],
243 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
244 dma_list[i] = 0x0;
245 }
246}
247
248static int genwqe_map_pages(struct genwqe_dev *cd,
249 struct page **page_list, int num_pages,
250 dma_addr_t *dma_list)
251{
252 int i;
253 struct pci_dev *pci_dev = cd->pci_dev;
254
255
256 for (i = 0; i < num_pages; i++) {
257 dma_addr_t daddr;
258
259 dma_list[i] = 0x0;
260 daddr = pci_map_page(pci_dev, page_list[i],
261 0,
262 PAGE_SIZE,
263 PCI_DMA_BIDIRECTIONAL);
264
265 if (pci_dma_mapping_error(pci_dev, daddr)) {
266 dev_err(&pci_dev->dev,
267 "[%s] err: no dma addr daddr=%016llx!\n",
268 __func__, (long long)daddr);
269 goto err;
270 }
271
272 dma_list[i] = daddr;
273 }
274 return 0;
275
276 err:
277 genwqe_unmap_pages(cd, dma_list, num_pages);
278 return -EIO;
279}
280
281static int genwqe_sgl_size(int num_pages)
282{
283 int len, num_tlb = num_pages / 7;
284
285 len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1);
286 return roundup(len, PAGE_SIZE);
287}
288
289
290
291
292
293
294
295
296
297int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
298 void __user *user_addr, size_t user_size)
299{
300 int rc;
301 struct pci_dev *pci_dev = cd->pci_dev;
302
303 sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
304 sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size);
305 sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE);
306 sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE;
307
308 dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n",
309 __func__, user_addr, user_size, sgl->nr_pages,
310 sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size);
311
312 sgl->user_addr = user_addr;
313 sgl->user_size = user_size;
314 sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
315
316 if (get_order(sgl->sgl_size) > MAX_ORDER) {
317 dev_err(&pci_dev->dev,
318 "[%s] err: too much memory requested!\n", __func__);
319 return -ENOMEM;
320 }
321
322 sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
323 &sgl->sgl_dma_addr);
324 if (sgl->sgl == NULL) {
325 dev_err(&pci_dev->dev,
326 "[%s] err: no memory available!\n", __func__);
327 return -ENOMEM;
328 }
329
330
331 if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) {
332 sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
333 &sgl->fpage_dma_addr);
334 if (sgl->fpage == NULL)
335 goto err_out;
336
337
338 if (copy_from_user(sgl->fpage + sgl->fpage_offs,
339 user_addr, sgl->fpage_size)) {
340 rc = -EFAULT;
341 goto err_out;
342 }
343 }
344 if (sgl->lpage_size != 0) {
345 sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
346 &sgl->lpage_dma_addr);
347 if (sgl->lpage == NULL)
348 goto err_out1;
349
350
351 if (copy_from_user(sgl->lpage, user_addr + user_size -
352 sgl->lpage_size, sgl->lpage_size)) {
353 rc = -EFAULT;
354 goto err_out1;
355 }
356 }
357 return 0;
358
359 err_out1:
360 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
361 sgl->fpage_dma_addr);
362 err_out:
363 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
364 sgl->sgl_dma_addr);
365 return -ENOMEM;
366}
367
368int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
369 dma_addr_t *dma_list)
370{
371 int i = 0, j = 0, p;
372 unsigned long dma_offs, map_offs;
373 dma_addr_t prev_daddr = 0;
374 struct sg_entry *s, *last_s = NULL;
375 size_t size = sgl->user_size;
376
377 dma_offs = 128;
378 map_offs = sgl->fpage_offs;
379
380 s = &sgl->sgl[0];
381 p = 0;
382 while (p < sgl->nr_pages) {
383 dma_addr_t daddr;
384 unsigned int size_to_map;
385
386
387 j = 0;
388 s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs);
389 s[j].len = cpu_to_be32(128);
390 s[j].flags = cpu_to_be32(SG_CHAINED);
391 j++;
392
393 while (j < 8) {
394
395 size_to_map = min(size, PAGE_SIZE - map_offs);
396
397 if ((p == 0) && (sgl->fpage != NULL)) {
398 daddr = sgl->fpage_dma_addr + map_offs;
399
400 } else if ((p == sgl->nr_pages - 1) &&
401 (sgl->lpage != NULL)) {
402 daddr = sgl->lpage_dma_addr;
403 } else {
404 daddr = dma_list[p] + map_offs;
405 }
406
407 size -= size_to_map;
408 map_offs = 0;
409
410 if (prev_daddr == daddr) {
411 u32 prev_len = be32_to_cpu(last_s->len);
412
413
414
415
416
417 last_s->len = cpu_to_be32(prev_len +
418 size_to_map);
419
420 p++;
421 if (p == sgl->nr_pages)
422 goto fixup;
423
424 prev_daddr = daddr + size_to_map;
425 continue;
426 }
427
428
429 s[j].target_addr = cpu_to_be64(daddr);
430 s[j].len = cpu_to_be32(size_to_map);
431 s[j].flags = cpu_to_be32(SG_DATA);
432 prev_daddr = daddr + size_to_map;
433 last_s = &s[j];
434 j++;
435
436 p++;
437 if (p == sgl->nr_pages)
438 goto fixup;
439 }
440 dma_offs += 128;
441 s += 8;
442 }
443 fixup:
444 if (j == 1) {
445 s -= 8;
446 j = 7;
447 }
448
449 for (i = 0; i < j; i++)
450 s[i] = s[i + 1];
451
452 s[i].target_addr = cpu_to_be64(0);
453 s[i].len = cpu_to_be32(0);
454 s[i].flags = cpu_to_be32(SG_END_LIST);
455 return 0;
456}
457
458
459
460
461
462
463
464
465int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
466{
467 int rc = 0;
468 struct pci_dev *pci_dev = cd->pci_dev;
469
470 if (sgl->fpage) {
471 if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs,
472 sgl->fpage_size)) {
473 dev_err(&pci_dev->dev, "[%s] err: copying fpage!\n",
474 __func__);
475 rc = -EFAULT;
476 }
477 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
478 sgl->fpage_dma_addr);
479 sgl->fpage = NULL;
480 sgl->fpage_dma_addr = 0;
481 }
482 if (sgl->lpage) {
483 if (copy_to_user(sgl->user_addr + sgl->user_size -
484 sgl->lpage_size, sgl->lpage,
485 sgl->lpage_size)) {
486 dev_err(&pci_dev->dev, "[%s] err: copying lpage!\n",
487 __func__);
488 rc = -EFAULT;
489 }
490 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
491 sgl->lpage_dma_addr);
492 sgl->lpage = NULL;
493 sgl->lpage_dma_addr = 0;
494 }
495 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
496 sgl->sgl_dma_addr);
497
498 sgl->sgl = NULL;
499 sgl->sgl_dma_addr = 0x0;
500 sgl->sgl_size = 0;
501 return rc;
502}
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519static int free_user_pages(struct page **page_list, unsigned int nr_pages,
520 int dirty)
521{
522 unsigned int i;
523
524 for (i = 0; i < nr_pages; i++) {
525 if (page_list[i] != NULL) {
526 if (dirty)
527 set_page_dirty_lock(page_list[i]);
528 put_page(page_list[i]);
529 }
530 }
531 return 0;
532}
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
558 unsigned long size, struct ddcb_requ *req)
559{
560 int rc = -EINVAL;
561 unsigned long data, offs;
562 struct pci_dev *pci_dev = cd->pci_dev;
563
564 if ((uaddr == NULL) || (size == 0)) {
565 m->size = 0;
566 return -EINVAL;
567 }
568 m->u_vaddr = uaddr;
569 m->size = size;
570
571
572 data = (unsigned long)uaddr;
573 offs = offset_in_page(data);
574 m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
575
576 m->page_list = kcalloc(m->nr_pages,
577 sizeof(struct page *) + sizeof(dma_addr_t),
578 GFP_KERNEL);
579 if (!m->page_list) {
580 dev_err(&pci_dev->dev, "err: alloc page_list failed\n");
581 m->nr_pages = 0;
582 m->u_vaddr = NULL;
583 m->size = 0;
584 return -ENOMEM;
585 }
586 m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
587
588
589 rc = get_user_pages_fast(data & PAGE_MASK,
590 m->nr_pages,
591 1,
592 m->page_list);
593 if (rc < 0)
594 goto fail_get_user_pages;
595
596
597 if (rc < m->nr_pages) {
598 free_user_pages(m->page_list, rc, 0);
599 rc = -EFAULT;
600 goto fail_get_user_pages;
601 }
602
603 rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
604 if (rc != 0)
605 goto fail_free_user_pages;
606
607 return 0;
608
609 fail_free_user_pages:
610 free_user_pages(m->page_list, m->nr_pages, 0);
611
612 fail_get_user_pages:
613 kfree(m->page_list);
614 m->page_list = NULL;
615 m->dma_list = NULL;
616 m->nr_pages = 0;
617 m->u_vaddr = NULL;
618 m->size = 0;
619 return rc;
620}
621
622
623
624
625
626
627
628int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
629 struct ddcb_requ *req)
630{
631 struct pci_dev *pci_dev = cd->pci_dev;
632
633 if (!dma_mapping_used(m)) {
634 dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n",
635 __func__, m);
636 return -EINVAL;
637 }
638
639 if (m->dma_list)
640 genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
641
642 if (m->page_list) {
643 free_user_pages(m->page_list, m->nr_pages, 1);
644
645 kfree(m->page_list);
646 m->page_list = NULL;
647 m->dma_list = NULL;
648 m->nr_pages = 0;
649 }
650
651 m->u_vaddr = NULL;
652 m->size = 0;
653 return 0;
654}
655
656
657
658
659
660
661
662
663
664u8 genwqe_card_type(struct genwqe_dev *cd)
665{
666 u64 card_type = cd->slu_unitcfg;
667
668 return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20);
669}
670
671
672
673
674
675int genwqe_card_reset(struct genwqe_dev *cd)
676{
677 u64 softrst;
678 struct pci_dev *pci_dev = cd->pci_dev;
679
680 if (!genwqe_is_privileged(cd))
681 return -ENODEV;
682
683
684 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull);
685 msleep(1000);
686 __genwqe_readq(cd, IO_HSU_FIR_CLR);
687 __genwqe_readq(cd, IO_APP_FIR_CLR);
688 __genwqe_readq(cd, IO_SLU_FIR_CLR);
689
690
691
692
693
694
695
696
697
698 softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull;
699 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull);
700
701
702 msleep(50);
703
704 if (genwqe_need_err_masking(cd)) {
705 dev_info(&pci_dev->dev,
706 "[%s] masking errors for old bitstreams\n", __func__);
707 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
708 }
709 return 0;
710}
711
712int genwqe_read_softreset(struct genwqe_dev *cd)
713{
714 u64 bitstream;
715
716 if (!genwqe_is_privileged(cd))
717 return -ENODEV;
718
719 bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
720 cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull;
721 return 0;
722}
723
724
725
726
727
728
729int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
730{
731 int rc;
732 struct pci_dev *pci_dev = cd->pci_dev;
733
734 rc = pci_enable_msi_range(pci_dev, 1, count);
735 if (rc < 0)
736 return rc;
737
738 cd->flags |= GENWQE_FLAG_MSI_ENABLED;
739 return 0;
740}
741
742
743
744
745
746void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
747{
748 struct pci_dev *pci_dev = cd->pci_dev;
749
750 if (cd->flags & GENWQE_FLAG_MSI_ENABLED) {
751 pci_disable_msi(pci_dev);
752 cd->flags &= ~GENWQE_FLAG_MSI_ENABLED;
753 }
754}
755
756
757
758
759
760
761
762
763
764
765
766static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
767 unsigned int *i, unsigned int m, u32 addr, u32 idx,
768 u64 val)
769{
770 if (WARN_ON_ONCE(*i >= m))
771 return -EFAULT;
772
773 r[*i].addr = addr;
774 r[*i].idx = idx;
775 r[*i].val = val;
776 ++*i;
777 return 0;
778}
779
780static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r,
781 unsigned int *i, unsigned int m, u32 addr, u64 val)
782{
783 return set_reg_idx(cd, r, i, m, addr, 0, val);
784}
785
786int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
787 unsigned int max_regs, int all)
788{
789 unsigned int i, j, idx = 0;
790 u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr;
791 u64 gfir, sluid, appid, ufir, ufec, sfir, sfec;
792
793
794 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
795 set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir);
796
797
798 sluid = __genwqe_readq(cd, IO_SLU_UNITCFG);
799 set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid);
800
801
802 appid = __genwqe_readq(cd, IO_APP_UNITCFG);
803 set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid);
804
805
806 for (i = 0; i < GENWQE_MAX_UNITS; i++) {
807
808
809 ufir_addr = (i << 24) | 0x008;
810 ufir = __genwqe_readq(cd, ufir_addr);
811 set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir);
812
813
814 ufec_addr = (i << 24) | 0x018;
815 ufec = __genwqe_readq(cd, ufec_addr);
816 set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec);
817
818 for (j = 0; j < 64; j++) {
819
820 if (!all && (!(ufir & (1ull << j))))
821 continue;
822
823 sfir_addr = (i << 24) | (0x100 + 8 * j);
824 sfir = __genwqe_readq(cd, sfir_addr);
825 set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir);
826
827 sfec_addr = (i << 24) | (0x300 + 8 * j);
828 sfec = __genwqe_readq(cd, sfec_addr);
829 set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec);
830 }
831 }
832
833
834 for (i = idx; i < max_regs; i++) {
835 regs[i].addr = 0xffffffff;
836 regs[i].val = 0xffffffffffffffffull;
837 }
838 return idx;
839}
840
841
842
843
844int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
845{
846 int entries = 0, ring, traps, traces, trace_entries;
847 u32 eevptr_addr, l_addr, d_len, d_type;
848 u64 eevptr, val, addr;
849
850 eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
851 eevptr = __genwqe_readq(cd, eevptr_addr);
852
853 if ((eevptr != 0x0) && (eevptr != -1ull)) {
854 l_addr = GENWQE_UID_OFFS(uid) | eevptr;
855
856 while (1) {
857 val = __genwqe_readq(cd, l_addr);
858
859 if ((val == 0x0) || (val == -1ull))
860 break;
861
862
863 d_len = (val & 0x0000007fff000000ull) >> 24;
864
865
866 d_type = (val & 0x0000008000000000ull) >> 36;
867
868 if (d_type) {
869 entries += d_len;
870 } else {
871 entries += d_len >> 3;
872 }
873
874 l_addr += 8;
875 }
876 }
877
878 for (ring = 0; ring < 8; ring++) {
879 addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
880 val = __genwqe_readq(cd, addr);
881
882 if ((val == 0x0ull) || (val == -1ull))
883 continue;
884
885 traps = (val >> 24) & 0xff;
886 traces = (val >> 16) & 0xff;
887 trace_entries = val & 0xffff;
888
889 entries += traps + (traces * trace_entries);
890 }
891 return entries;
892}
893
894
895
896
897int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
898 struct genwqe_reg *regs, unsigned int max_regs)
899{
900 int i, traps, traces, trace, trace_entries, trace_entry, ring;
901 unsigned int idx = 0;
902 u32 eevptr_addr, l_addr, d_addr, d_len, d_type;
903 u64 eevptr, e, val, addr;
904
905 eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
906 eevptr = __genwqe_readq(cd, eevptr_addr);
907
908 if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) {
909 l_addr = GENWQE_UID_OFFS(uid) | eevptr;
910 while (1) {
911 e = __genwqe_readq(cd, l_addr);
912 if ((e == 0x0) || (e == 0xffffffffffffffffull))
913 break;
914
915 d_addr = (e & 0x0000000000ffffffull);
916 d_len = (e & 0x0000007fff000000ull) >> 24;
917 d_type = (e & 0x0000008000000000ull) >> 36;
918 d_addr |= GENWQE_UID_OFFS(uid);
919
920 if (d_type) {
921 for (i = 0; i < (int)d_len; i++) {
922 val = __genwqe_readq(cd, d_addr);
923 set_reg_idx(cd, regs, &idx, max_regs,
924 d_addr, i, val);
925 }
926 } else {
927 d_len >>= 3;
928 for (i = 0; i < (int)d_len; i++, d_addr += 8) {
929 val = __genwqe_readq(cd, d_addr);
930 set_reg_idx(cd, regs, &idx, max_regs,
931 d_addr, 0, val);
932 }
933 }
934 l_addr += 8;
935 }
936 }
937
938
939
940
941
942 for (ring = 0; ring < 8; ring++) {
943
944 addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
945 val = __genwqe_readq(cd, addr);
946
947 if ((val == 0x0ull) || (val == -1ull))
948 continue;
949
950 traps = (val >> 24) & 0xff;
951 traces = (val >> 16) & 0xff;
952 trace_entries = val & 0xffff;
953
954
955
956
957 for (trace = 0; trace <= traces; trace++) {
958 u32 diag_sel =
959 GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace);
960
961 addr = (GENWQE_UID_OFFS(uid) |
962 IO_EXTENDED_DIAG_SELECTOR);
963 __genwqe_writeq(cd, addr, diag_sel);
964
965 for (trace_entry = 0;
966 trace_entry < (trace ? trace_entries : traps);
967 trace_entry++) {
968 addr = (GENWQE_UID_OFFS(uid) |
969 IO_EXTENDED_DIAG_READ_MBX);
970 val = __genwqe_readq(cd, addr);
971 set_reg_idx(cd, regs, &idx, max_regs, addr,
972 (diag_sel<<16) | trace_entry, val);
973 }
974 }
975 }
976 return 0;
977}
978
979
980
981
982
983
984
985int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
986{
987 __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
988 __genwqe_writeq(cd, reg, val);
989 return 0;
990}
991
992
993
994
995
996
997
998u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
999{
1000 __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
1001 return __genwqe_readq(cd, reg);
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016int genwqe_base_clock_frequency(struct genwqe_dev *cd)
1017{
1018 u16 speed;
1019 static const int speed_grade[] = { 250, 200, 166, 175 };
1020
1021 speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
1022 if (speed >= ARRAY_SIZE(speed_grade))
1023 return 0;
1024
1025 return speed_grade[speed];
1026}
1027
1028
1029
1030
1031
1032
1033void genwqe_stop_traps(struct genwqe_dev *cd)
1034{
1035 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull);
1036}
1037
1038
1039
1040
1041
1042
1043void genwqe_start_traps(struct genwqe_dev *cd)
1044{
1045 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull);
1046
1047 if (genwqe_need_err_masking(cd))
1048 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
1049}
1050