1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/pci.h>
34#include <linux/io.h>
35#include <linux/delay.h>
36#include <linux/vmalloc.h>
37#include <linux/aer.h>
38#include <linux/module.h>
39
40#include "qib.h"
41
42
43
44
45
46
47
48
49
50
51
52
53
54static void qib_tune_pcie_caps(struct qib_devdata *);
55static void qib_tune_pcie_coalesce(struct qib_devdata *);
56
57
58
59
60
61
62
63int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
64{
65 int ret;
66
67 ret = pci_enable_device(pdev);
68 if (ret) {
69
70
71
72
73
74
75
76
77
78
79
80
81 qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
82 -ret);
83 goto done;
84 }
85
86 ret = pci_request_regions(pdev, QIB_DRV_NAME);
87 if (ret) {
88 qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
89 goto bail;
90 }
91
92 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
93 if (ret) {
94
95
96
97
98
99 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
100 if (ret) {
101 qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
102 goto bail;
103 }
104 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
105 } else
106 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
107 if (ret) {
108 qib_early_err(&pdev->dev,
109 "Unable to set DMA consistent mask: %d\n", ret);
110 goto bail;
111 }
112
113 pci_set_master(pdev);
114 ret = pci_enable_pcie_error_reporting(pdev);
115 if (ret) {
116 qib_early_err(&pdev->dev,
117 "Unable to enable pcie error reporting: %d\n",
118 ret);
119 ret = 0;
120 }
121 goto done;
122
123bail:
124 pci_disable_device(pdev);
125 pci_release_regions(pdev);
126done:
127 return ret;
128}
129
130
131
132
133
134
135int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
136 const struct pci_device_id *ent)
137{
138 unsigned long len;
139 resource_size_t addr;
140
141 dd->pcidev = pdev;
142 pci_set_drvdata(pdev, dd);
143
144 addr = pci_resource_start(pdev, 0);
145 len = pci_resource_len(pdev, 0);
146
147#if defined(__powerpc__)
148
149 dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU);
150#else
151 dd->kregbase = ioremap_nocache(addr, len);
152#endif
153
154 if (!dd->kregbase)
155 return -ENOMEM;
156
157 dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len);
158 dd->physaddr = addr;
159
160
161
162
163
164 dd->pcibar0 = addr;
165 dd->pcibar1 = addr >> 32;
166 dd->deviceid = ent->device;
167 dd->vendorid = ent->vendor;
168
169 return 0;
170}
171
172
173
174
175
176
177void qib_pcie_ddcleanup(struct qib_devdata *dd)
178{
179 u64 __iomem *base = (void __iomem *) dd->kregbase;
180
181 dd->kregbase = NULL;
182 iounmap(base);
183 if (dd->piobase)
184 iounmap(dd->piobase);
185 if (dd->userbase)
186 iounmap(dd->userbase);
187 if (dd->piovl15base)
188 iounmap(dd->piovl15base);
189
190 pci_disable_device(dd->pcidev);
191 pci_release_regions(dd->pcidev);
192
193 pci_set_drvdata(dd->pcidev, NULL);
194}
195
196static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
197 struct qib_msix_entry *qib_msix_entry)
198{
199 int ret;
200 int nvec = *msixcnt;
201 struct msix_entry *msix_entry;
202 int i;
203
204 ret = pci_msix_vec_count(dd->pcidev);
205 if (ret < 0)
206 goto do_intx;
207
208 nvec = min(nvec, ret);
209
210
211
212
213 msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL);
214 if (!msix_entry)
215 goto do_intx;
216
217 for (i = 0; i < nvec; i++)
218 msix_entry[i] = qib_msix_entry[i].msix;
219
220 ret = pci_enable_msix_range(dd->pcidev, msix_entry, 1, nvec);
221 if (ret < 0)
222 goto free_msix_entry;
223 else
224 nvec = ret;
225
226 for (i = 0; i < nvec; i++)
227 qib_msix_entry[i].msix = msix_entry[i];
228
229 kfree(msix_entry);
230 *msixcnt = nvec;
231 return;
232
233free_msix_entry:
234 kfree(msix_entry);
235
236do_intx:
237 qib_dev_err(
238 dd,
239 "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
240 nvec, ret);
241 *msixcnt = 0;
242 qib_enable_intx(dd->pcidev);
243}
244
245
246
247
248
249
250static int qib_msi_setup(struct qib_devdata *dd, int pos)
251{
252 struct pci_dev *pdev = dd->pcidev;
253 u16 control;
254 int ret;
255
256 ret = pci_enable_msi(pdev);
257 if (ret)
258 qib_dev_err(dd,
259 "pci_enable_msi failed: %d, interrupts may not work\n",
260 ret);
261
262
263 pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
264 &dd->msi_lo);
265 pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
266 &dd->msi_hi);
267 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
268
269 pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT)
270 ? 12 : 8),
271 &dd->msi_data);
272 return ret;
273}
274
275int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
276 struct qib_msix_entry *entry)
277{
278 u16 linkstat, speed;
279 int pos = 0, ret = 1;
280
281 if (!pci_is_pcie(dd->pcidev)) {
282 qib_dev_err(dd, "Can't find PCI Express capability!\n");
283
284 dd->lbus_width = 1;
285 dd->lbus_speed = 2500;
286 goto bail;
287 }
288
289 pos = dd->pcidev->msix_cap;
290 if (nent && *nent && pos) {
291 qib_msix_setup(dd, pos, nent, entry);
292 ret = 0;
293 } else {
294 pos = dd->pcidev->msi_cap;
295 if (pos)
296 ret = qib_msi_setup(dd, pos);
297 else
298 qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
299 }
300 if (!pos)
301 qib_enable_intx(dd->pcidev);
302
303 pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
304
305
306
307
308 speed = linkstat & 0xf;
309 linkstat >>= 4;
310 linkstat &= 0x1f;
311 dd->lbus_width = linkstat;
312
313 switch (speed) {
314 case 1:
315 dd->lbus_speed = 2500;
316 break;
317 case 2:
318 dd->lbus_speed = 5000;
319 break;
320 default:
321 dd->lbus_speed = 2500;
322 break;
323 }
324
325
326
327
328
329 if (minw && linkstat < minw)
330 qib_dev_err(dd,
331 "PCIe width %u (x%u HCA), performance reduced\n",
332 linkstat, minw);
333
334 qib_tune_pcie_caps(dd);
335
336 qib_tune_pcie_coalesce(dd);
337
338bail:
339
340 snprintf(dd->lbus_info, sizeof(dd->lbus_info),
341 "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
342 return ret;
343}
344
345
346
347
348
349
350
351
352
353int qib_reinit_intr(struct qib_devdata *dd)
354{
355 int pos;
356 u16 control;
357 int ret = 0;
358
359
360 if (!dd->msi_lo)
361 goto bail;
362
363 pos = dd->pcidev->msi_cap;
364 if (!pos) {
365 qib_dev_err(dd,
366 "Can't find MSI capability, can't restore MSI settings\n");
367 ret = 0;
368
369 goto bail;
370 }
371 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
372 dd->msi_lo);
373 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
374 dd->msi_hi);
375 pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
376 if (!(control & PCI_MSI_FLAGS_ENABLE)) {
377 control |= PCI_MSI_FLAGS_ENABLE;
378 pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
379 control);
380 }
381
382 pci_write_config_word(dd->pcidev, pos +
383 ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
384 dd->msi_data);
385 ret = 1;
386bail:
387 if (!ret && (dd->flags & QIB_HAS_INTX)) {
388 qib_enable_intx(dd->pcidev);
389 ret = 1;
390 }
391
392
393 pci_set_master(dd->pcidev);
394
395 return ret;
396}
397
398
399
400
401
402
403void qib_nomsi(struct qib_devdata *dd)
404{
405 dd->msi_lo = 0;
406 pci_disable_msi(dd->pcidev);
407}
408
409
410
411
412void qib_nomsix(struct qib_devdata *dd)
413{
414 pci_disable_msix(dd->pcidev);
415}
416
417
418
419
420
421void qib_enable_intx(struct pci_dev *pdev)
422{
423 u16 cw, new;
424 int pos;
425
426
427 pci_read_config_word(pdev, PCI_COMMAND, &cw);
428 new = cw & ~PCI_COMMAND_INTX_DISABLE;
429 if (new != cw)
430 pci_write_config_word(pdev, PCI_COMMAND, new);
431
432 pos = pdev->msi_cap;
433 if (pos) {
434
435 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
436 new = cw & ~PCI_MSI_FLAGS_ENABLE;
437 if (new != cw)
438 pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
439 }
440 pos = pdev->msix_cap;
441 if (pos) {
442
443 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
444 new = cw & ~PCI_MSIX_FLAGS_ENABLE;
445 if (new != cw)
446 pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new);
447 }
448}
449
450
451
452
453
454void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
455{
456 pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd);
457 pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
458 pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
459}
460
461void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
462{
463 int r;
464
465 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
466 dd->pcibar0);
467 if (r)
468 qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
469 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
470 dd->pcibar1);
471 if (r)
472 qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
473
474 pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd);
475 pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
476 pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
477 r = pci_enable_device(dd->pcidev);
478 if (r)
479 qib_dev_err(dd,
480 "pci_enable_device failed after reset: %d\n", r);
481}
482
483
484static int qib_pcie_coalesce;
485module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
486MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets");
487
488
489
490
491
492
493
494static void qib_tune_pcie_coalesce(struct qib_devdata *dd)
495{
496 int r;
497 struct pci_dev *parent;
498 u16 devid;
499 u32 mask, bits, val;
500
501 if (!qib_pcie_coalesce)
502 return;
503
504
505 parent = dd->pcidev->bus->self;
506 if (parent->bus->parent) {
507 qib_devinfo(dd->pcidev, "Parent not root\n");
508 return;
509 }
510 if (!pci_is_pcie(parent))
511 return;
512 if (parent->vendor != 0x8086)
513 return;
514
515
516
517
518
519
520
521
522
523
524 devid = parent->device;
525 if (devid >= 0x25e2 && devid <= 0x25fa) {
526
527 if (parent->revision <= 0xb2)
528 bits = 1U << 10;
529 else
530 bits = 7U << 10;
531 mask = (3U << 24) | (7U << 10);
532 } else if (devid >= 0x65e2 && devid <= 0x65fa) {
533
534 bits = 1U << 10;
535 mask = (3U << 24) | (7U << 10);
536 } else if (devid >= 0x4021 && devid <= 0x402e) {
537
538 bits = 7U << 10;
539 mask = 7U << 10;
540 } else if (devid >= 0x3604 && devid <= 0x360a) {
541
542 bits = 7U << 10;
543 mask = (3U << 24) | (7U << 10);
544 } else {
545
546 return;
547 }
548 pci_read_config_dword(parent, 0x48, &val);
549 val &= ~mask;
550 val |= bits;
551 r = pci_write_config_dword(parent, 0x48, val);
552}
553
554
555
556
557
558static int qib_pcie_caps;
559module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
560MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
561
562static void qib_tune_pcie_caps(struct qib_devdata *dd)
563{
564 struct pci_dev *parent;
565 u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
566 u16 rc_mrrs, ep_mrrs, max_mrrs;
567
568
569 parent = dd->pcidev->bus->self;
570 if (!pci_is_root_bus(parent->bus)) {
571 qib_devinfo(dd->pcidev, "Parent not root\n");
572 return;
573 }
574
575 if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
576 return;
577
578 rc_mpss = parent->pcie_mpss;
579 rc_mps = ffs(pcie_get_mps(parent)) - 8;
580
581 ep_mpss = dd->pcidev->pcie_mpss;
582 ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
583
584
585 if (rc_mpss > ep_mpss)
586 rc_mpss = ep_mpss;
587
588
589 if (rc_mpss > (qib_pcie_caps & 7))
590 rc_mpss = qib_pcie_caps & 7;
591
592 if (rc_mpss > rc_mps) {
593 rc_mps = rc_mpss;
594 pcie_set_mps(parent, 128 << rc_mps);
595 }
596
597 if (rc_mpss > ep_mps) {
598 ep_mps = rc_mpss;
599 pcie_set_mps(dd->pcidev, 128 << ep_mps);
600 }
601
602
603
604
605
606
607 max_mrrs = 5;
608 if (max_mrrs > ((qib_pcie_caps >> 4) & 7))
609 max_mrrs = (qib_pcie_caps >> 4) & 7;
610
611 max_mrrs = 128 << max_mrrs;
612 rc_mrrs = pcie_get_readrq(parent);
613 ep_mrrs = pcie_get_readrq(dd->pcidev);
614
615 if (max_mrrs > rc_mrrs) {
616 rc_mrrs = max_mrrs;
617 pcie_set_readrq(parent, rc_mrrs);
618 }
619 if (max_mrrs > ep_mrrs) {
620 ep_mrrs = max_mrrs;
621 pcie_set_readrq(dd->pcidev, ep_mrrs);
622 }
623}
624
625
626
627
628
629
630static pci_ers_result_t
631qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
632{
633 struct qib_devdata *dd = pci_get_drvdata(pdev);
634 pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
635
636 switch (state) {
637 case pci_channel_io_normal:
638 qib_devinfo(pdev, "State Normal, ignoring\n");
639 break;
640
641 case pci_channel_io_frozen:
642 qib_devinfo(pdev, "State Frozen, requesting reset\n");
643 pci_disable_device(pdev);
644 ret = PCI_ERS_RESULT_NEED_RESET;
645 break;
646
647 case pci_channel_io_perm_failure:
648 qib_devinfo(pdev, "State Permanent Failure, disabling\n");
649 if (dd) {
650
651 dd->flags &= ~QIB_PRESENT;
652 qib_disable_after_error(dd);
653 }
654
655 ret = PCI_ERS_RESULT_DISCONNECT;
656 break;
657
658 default:
659 qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n",
660 state);
661 break;
662 }
663 return ret;
664}
665
666static pci_ers_result_t
667qib_pci_mmio_enabled(struct pci_dev *pdev)
668{
669 u64 words = 0U;
670 struct qib_devdata *dd = pci_get_drvdata(pdev);
671 pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
672
673 if (dd && dd->pport) {
674 words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV);
675 if (words == ~0ULL)
676 ret = PCI_ERS_RESULT_NEED_RESET;
677 }
678 qib_devinfo(pdev,
679 "QIB mmio_enabled function called, read wordscntr %Lx, returning %d\n",
680 words, ret);
681 return ret;
682}
683
684static pci_ers_result_t
685qib_pci_slot_reset(struct pci_dev *pdev)
686{
687 qib_devinfo(pdev, "QIB slot_reset function called, ignored\n");
688 return PCI_ERS_RESULT_CAN_RECOVER;
689}
690
691static pci_ers_result_t
692qib_pci_link_reset(struct pci_dev *pdev)
693{
694 qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
695 return PCI_ERS_RESULT_CAN_RECOVER;
696}
697
698static void
699qib_pci_resume(struct pci_dev *pdev)
700{
701 struct qib_devdata *dd = pci_get_drvdata(pdev);
702
703 qib_devinfo(pdev, "QIB resume function called\n");
704 pci_cleanup_aer_uncorrect_error_status(pdev);
705
706
707
708
709
710 qib_init(dd, 1);
711}
712
713const struct pci_error_handlers qib_pci_err_handler = {
714 .error_detected = qib_pci_error_detected,
715 .mmio_enabled = qib_pci_mmio_enabled,
716 .link_reset = qib_pci_link_reset,
717 .slot_reset = qib_pci_slot_reset,
718 .resume = qib_pci_resume,
719};
720