1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/atomic.h>
29#include <linux/delay.h>
30#include <linux/export.h>
31#include <linux/init.h>
32#include <linux/list.h>
33#include <linux/of.h>
34#include <linux/pci.h>
35#include <linux/proc_fs.h>
36#include <linux/rbtree.h>
37#include <linux/sched.h>
38#include <linux/seq_file.h>
39#include <linux/spinlock.h>
40
41#include <asm/eeh.h>
42#include <asm/eeh_event.h>
43#include <asm/io.h>
44#include <asm/machdep.h>
45#include <asm/ppc-pci.h>
46#include <asm/rtas.h>
47
48
49static int ibm_set_eeh_option;
50static int ibm_set_slot_reset;
51static int ibm_read_slot_reset_state;
52static int ibm_read_slot_reset_state2;
53static int ibm_slot_error_detail;
54static int ibm_get_config_addr_info;
55static int ibm_get_config_addr_info2;
56static int ibm_configure_pe;
57
58#ifdef CONFIG_PCI_IOV
59void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
60{
61 struct pci_dn *pdn = pci_get_pdn(pdev);
62 struct pci_dn *physfn_pdn;
63 struct eeh_dev *edev;
64
65 if (!pdev->is_virtfn)
66 return;
67
68 pdn->device_id = pdev->device;
69 pdn->vendor_id = pdev->vendor;
70 pdn->class_code = pdev->class;
71
72
73
74
75
76 pdn->last_allow_rc = 0;
77 physfn_pdn = pci_get_pdn(pdev->physfn);
78 pdn->pe_number = physfn_pdn->pe_num_map[pdn->vf_index];
79 edev = pdn_to_eeh_dev(pdn);
80
81
82
83
84
85 eeh_add_device_early(pdn);
86 eeh_add_device_late(pdev);
87 edev->pe_config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
88 eeh_rmv_from_parent_pe(edev);
89 eeh_add_to_parent_pe(edev);
90 eeh_sysfs_add_device(pdev);
91
92}
93#endif
94
95
96
97
98
99
100static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
101static DEFINE_SPINLOCK(slot_errbuf_lock);
102static int eeh_error_buf_size;
103
104
105
106
107
108
109static int pseries_eeh_init(void)
110{
111
112 ibm_set_eeh_option = rtas_token("ibm,set-eeh-option");
113 ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
114 ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
115 ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
116 ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
117 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
118 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
119 ibm_configure_pe = rtas_token("ibm,configure-pe");
120
121
122
123
124
125
126 if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
127 ibm_configure_pe = rtas_token("ibm,configure-bridge");
128
129
130
131
132
133
134 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE ||
135 ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE ||
136 (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
137 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
138 ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE ||
139 ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
140 pr_info("EEH functionality not supported\n");
141 return -EINVAL;
142 }
143
144
145 spin_lock_init(&slot_errbuf_lock);
146 eeh_error_buf_size = rtas_token("rtas-error-log-max");
147 if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
148 pr_info("%s: unknown EEH error log size\n",
149 __func__);
150 eeh_error_buf_size = 1024;
151 } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
152 pr_info("%s: EEH error log size %d exceeds the maximal %d\n",
153 __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
154 eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
155 }
156
157
158 eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG);
159
160#ifdef CONFIG_PCI_IOV
161
162 ppc_md.pcibios_bus_add_device = pseries_pcibios_bus_add_device;
163#endif
164
165 return 0;
166}
167
168static int pseries_eeh_cap_start(struct pci_dn *pdn)
169{
170 u32 status;
171
172 if (!pdn)
173 return 0;
174
175 rtas_read_config(pdn, PCI_STATUS, 2, &status);
176 if (!(status & PCI_STATUS_CAP_LIST))
177 return 0;
178
179 return PCI_CAPABILITY_LIST;
180}
181
182
183static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap)
184{
185 int pos = pseries_eeh_cap_start(pdn);
186 int cnt = 48;
187 u32 id;
188
189 if (!pos)
190 return 0;
191
192 while (cnt--) {
193 rtas_read_config(pdn, pos, 1, &pos);
194 if (pos < 0x40)
195 break;
196 pos &= ~3;
197 rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
198 if (id == 0xff)
199 break;
200 if (id == cap)
201 return pos;
202 pos += PCI_CAP_LIST_NEXT;
203 }
204
205 return 0;
206}
207
208static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap)
209{
210 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
211 u32 header;
212 int pos = 256;
213 int ttl = (4096 - 256) / 8;
214
215 if (!edev || !edev->pcie_cap)
216 return 0;
217 if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
218 return 0;
219 else if (!header)
220 return 0;
221
222 while (ttl-- > 0) {
223 if (PCI_EXT_CAP_ID(header) == cap && pos)
224 return pos;
225
226 pos = PCI_EXT_CAP_NEXT(header);
227 if (pos < 256)
228 break;
229
230 if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
231 break;
232 }
233
234 return 0;
235}
236
237
238
239
240
241
242
243
244
245
246static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
247{
248 struct eeh_dev *edev;
249 struct eeh_pe pe;
250 u32 pcie_flags;
251 int enable = 0;
252 int ret;
253
254
255 edev = pdn_to_eeh_dev(pdn);
256 if (!edev || edev->pe)
257 return NULL;
258
259
260 if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code)
261 return NULL;
262
263
264 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
265 return NULL;
266
267
268
269
270
271
272 edev->class_code = pdn->class_code;
273 edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
274 edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
275 edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
276 edev->mode &= 0xFFFFFF00;
277 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
278 edev->mode |= EEH_DEV_BRIDGE;
279 if (edev->pcie_cap) {
280 rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
281 2, &pcie_flags);
282 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
283 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
284 edev->mode |= EEH_DEV_ROOT_PORT;
285 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
286 edev->mode |= EEH_DEV_DS_PORT;
287 }
288 }
289
290
291 memset(&pe, 0, sizeof(struct eeh_pe));
292 pe.phb = pdn->phb;
293 pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
294
295
296 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
297 if (!ret) {
298
299 edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
300 pe.addr = edev->pe_config_addr;
301
302
303
304
305
306 ret = eeh_ops->get_state(&pe, NULL);
307 if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
308 enable = 1;
309
310 if (enable) {
311 eeh_add_flag(EEH_ENABLED);
312 eeh_add_to_parent_pe(edev);
313
314 pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%x-PE#%x\n",
315 __func__, pdn->busno, PCI_SLOT(pdn->devfn),
316 PCI_FUNC(pdn->devfn), pe.phb->global_number,
317 pe.addr);
318 } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) &&
319 (pdn_to_eeh_dev(pdn->parent))->pe) {
320
321
322
323 edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr;
324 eeh_add_to_parent_pe(edev);
325 }
326 }
327
328
329 eeh_save_bars(edev);
330
331 return NULL;
332}
333
334
335
336
337
338
339
340
341
342
343static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
344{
345 int ret = 0;
346 int config_addr;
347
348
349
350
351
352
353
354 switch (option) {
355 case EEH_OPT_DISABLE:
356 case EEH_OPT_ENABLE:
357 case EEH_OPT_THAW_MMIO:
358 case EEH_OPT_THAW_DMA:
359 config_addr = pe->config_addr;
360 if (pe->addr)
361 config_addr = pe->addr;
362 break;
363 case EEH_OPT_FREEZE_PE:
364
365 return 0;
366 default:
367 pr_err("%s: Invalid option %d\n",
368 __func__, option);
369 return -EINVAL;
370 }
371
372 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
373 config_addr, BUID_HI(pe->phb->buid),
374 BUID_LO(pe->phb->buid), option);
375
376 return ret;
377}
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
393{
394 int ret = 0;
395 int rets[3];
396
397 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
398
399
400
401
402
403 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
404 pe->config_addr, BUID_HI(pe->phb->buid),
405 BUID_LO(pe->phb->buid), 1);
406 if (ret || (rets[0] == 0))
407 return 0;
408
409
410 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
411 pe->config_addr, BUID_HI(pe->phb->buid),
412 BUID_LO(pe->phb->buid), 0);
413 if (ret) {
414 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
415 __func__, pe->phb->global_number, pe->config_addr);
416 return 0;
417 }
418
419 return rets[0];
420 }
421
422 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
423 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
424 pe->config_addr, BUID_HI(pe->phb->buid),
425 BUID_LO(pe->phb->buid), 0);
426 if (ret) {
427 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
428 __func__, pe->phb->global_number, pe->config_addr);
429 return 0;
430 }
431
432 return rets[0];
433 }
434
435 return ret;
436}
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay)
452{
453 int config_addr;
454 int ret;
455 int rets[4];
456 int result;
457
458
459 config_addr = pe->config_addr;
460 if (pe->addr)
461 config_addr = pe->addr;
462
463 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
464 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets,
465 config_addr, BUID_HI(pe->phb->buid),
466 BUID_LO(pe->phb->buid));
467 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) {
468
469 rets[2] = 0;
470 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
471 config_addr, BUID_HI(pe->phb->buid),
472 BUID_LO(pe->phb->buid));
473 } else {
474 return EEH_STATE_NOT_SUPPORT;
475 }
476
477 if (ret)
478 return ret;
479
480
481 if (!rets[1])
482 return EEH_STATE_NOT_SUPPORT;
483
484 switch(rets[0]) {
485 case 0:
486 result = EEH_STATE_MMIO_ACTIVE |
487 EEH_STATE_DMA_ACTIVE;
488 break;
489 case 1:
490 result = EEH_STATE_RESET_ACTIVE |
491 EEH_STATE_MMIO_ACTIVE |
492 EEH_STATE_DMA_ACTIVE;
493 break;
494 case 2:
495 result = 0;
496 break;
497 case 4:
498 result = EEH_STATE_MMIO_ENABLED;
499 break;
500 case 5:
501 if (rets[2]) {
502 if (delay)
503 *delay = rets[2];
504 result = EEH_STATE_UNAVAILABLE;
505 } else {
506 result = EEH_STATE_NOT_SUPPORT;
507 }
508 break;
509 default:
510 result = EEH_STATE_NOT_SUPPORT;
511 }
512
513 return result;
514}
515
516
517
518
519
520
521
522
523static int pseries_eeh_reset(struct eeh_pe *pe, int option)
524{
525 int config_addr;
526 int ret;
527
528
529 config_addr = pe->config_addr;
530 if (pe->addr)
531 config_addr = pe->addr;
532
533
534 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
535 config_addr, BUID_HI(pe->phb->buid),
536 BUID_LO(pe->phb->buid), option);
537
538
539 if (option == EEH_RESET_FUNDAMENTAL &&
540 ret == -8) {
541 option = EEH_RESET_HOT;
542 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
543 config_addr, BUID_HI(pe->phb->buid),
544 BUID_LO(pe->phb->buid), option);
545 }
546
547
548 if (option == EEH_RESET_FUNDAMENTAL ||
549 option == EEH_RESET_HOT)
550 msleep(EEH_PE_RST_HOLD_TIME);
551 else
552 msleep(EEH_PE_RST_SETTLE_TIME);
553
554 return ret;
555}
556
557
558
559
560
561
562
563
564
565
566
567
568static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len)
569{
570 int config_addr;
571 unsigned long flags;
572 int ret;
573
574 spin_lock_irqsave(&slot_errbuf_lock, flags);
575 memset(slot_errbuf, 0, eeh_error_buf_size);
576
577
578 config_addr = pe->config_addr;
579 if (pe->addr)
580 config_addr = pe->addr;
581
582 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr,
583 BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid),
584 virt_to_phys(drv_log), len,
585 virt_to_phys(slot_errbuf), eeh_error_buf_size,
586 severity);
587 if (!ret)
588 log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
589 spin_unlock_irqrestore(&slot_errbuf_lock, flags);
590
591 return ret;
592}
593
594
595
596
597
598
599
600
601
602static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
603{
604 int config_addr;
605 int ret;
606
607 int max_wait = 200;
608
609
610 config_addr = pe->config_addr;
611 if (pe->addr)
612 config_addr = pe->addr;
613
614 while (max_wait > 0) {
615 ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
616 config_addr, BUID_HI(pe->phb->buid),
617 BUID_LO(pe->phb->buid));
618
619 if (!ret)
620 return ret;
621
622
623
624
625
626
627 if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
628 ret <= RTAS_EXTENDED_DELAY_MAX)
629 ret = RTAS_EXTENDED_DELAY_MIN+2;
630
631 max_wait -= rtas_busy_delay_time(ret);
632
633 if (max_wait < 0)
634 break;
635
636 rtas_busy_delay(ret);
637 }
638
639 pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n",
640 __func__, pe->phb->global_number, pe->addr, ret);
641 return ret;
642}
643
644
645
646
647
648
649
650
651
652
653static int pseries_eeh_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
654{
655 return rtas_read_config(pdn, where, size, val);
656}
657
658
659
660
661
662
663
664
665
666
667static int pseries_eeh_write_config(struct pci_dn *pdn, int where, int size, u32 val)
668{
669 return rtas_write_config(pdn, where, size, val);
670}
671
672static int pseries_eeh_restore_config(struct pci_dn *pdn)
673{
674 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
675 s64 ret = 0;
676
677 if (!edev)
678 return -EEXIST;
679
680
681
682
683
684 if (edev->physfn)
685 ret = eeh_restore_vf_config(pdn);
686
687 if (ret) {
688 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
689 __func__, edev->pe_config_addr, ret);
690 return -EIO;
691 }
692
693 return ret;
694}
695
696#ifdef CONFIG_PCI_IOV
697int pseries_send_allow_unfreeze(struct pci_dn *pdn,
698 u16 *vf_pe_array, int cur_vfs)
699{
700 int rc;
701 int ibm_allow_unfreeze = rtas_token("ibm,open-sriov-allow-unfreeze");
702 unsigned long buid, addr;
703
704 addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
705 buid = pdn->phb->buid;
706 spin_lock(&rtas_data_buf_lock);
707 memcpy(rtas_data_buf, vf_pe_array, RTAS_DATA_BUF_SIZE);
708 rc = rtas_call(ibm_allow_unfreeze, 5, 1, NULL,
709 addr,
710 BUID_HI(buid),
711 BUID_LO(buid),
712 rtas_data_buf, cur_vfs * sizeof(u16));
713 spin_unlock(&rtas_data_buf_lock);
714 if (rc)
715 pr_warn("%s: Failed to allow unfreeze for PHB#%x-PE#%lx, rc=%x\n",
716 __func__,
717 pdn->phb->global_number, addr, rc);
718 return rc;
719}
720
721static int pseries_call_allow_unfreeze(struct eeh_dev *edev)
722{
723 struct pci_dn *pdn, *tmp, *parent, *physfn_pdn;
724 int cur_vfs = 0, rc = 0, vf_index, bus, devfn;
725 u16 *vf_pe_array;
726
727 vf_pe_array = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
728 if (!vf_pe_array)
729 return -ENOMEM;
730 if (pci_num_vf(edev->physfn ? edev->physfn : edev->pdev)) {
731 if (edev->pdev->is_physfn) {
732 cur_vfs = pci_num_vf(edev->pdev);
733 pdn = eeh_dev_to_pdn(edev);
734 parent = pdn->parent;
735 for (vf_index = 0; vf_index < cur_vfs; vf_index++)
736 vf_pe_array[vf_index] =
737 cpu_to_be16(pdn->pe_num_map[vf_index]);
738 rc = pseries_send_allow_unfreeze(pdn, vf_pe_array,
739 cur_vfs);
740 pdn->last_allow_rc = rc;
741 for (vf_index = 0; vf_index < cur_vfs; vf_index++) {
742 list_for_each_entry_safe(pdn, tmp,
743 &parent->child_list,
744 list) {
745 bus = pci_iov_virtfn_bus(edev->pdev,
746 vf_index);
747 devfn = pci_iov_virtfn_devfn(edev->pdev,
748 vf_index);
749 if (pdn->busno != bus ||
750 pdn->devfn != devfn)
751 continue;
752 pdn->last_allow_rc = rc;
753 }
754 }
755 } else {
756 pdn = pci_get_pdn(edev->pdev);
757 vf_pe_array[0] = cpu_to_be16(pdn->pe_number);
758 physfn_pdn = pci_get_pdn(edev->physfn);
759 rc = pseries_send_allow_unfreeze(physfn_pdn,
760 vf_pe_array, 1);
761 pdn->last_allow_rc = rc;
762 }
763 }
764
765 kfree(vf_pe_array);
766 return rc;
767}
768
769static int pseries_notify_resume(struct pci_dn *pdn)
770{
771 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
772
773 if (!edev)
774 return -EEXIST;
775
776 if (rtas_token("ibm,open-sriov-allow-unfreeze")
777 == RTAS_UNKNOWN_SERVICE)
778 return -EINVAL;
779
780 if (edev->pdev->is_physfn || edev->pdev->is_virtfn)
781 return pseries_call_allow_unfreeze(edev);
782
783 return 0;
784}
785#endif
786
787static struct eeh_ops pseries_eeh_ops = {
788 .name = "pseries",
789 .init = pseries_eeh_init,
790 .probe = pseries_eeh_probe,
791 .set_option = pseries_eeh_set_option,
792 .get_pe_addr = pseries_eeh_get_pe_addr,
793 .get_state = pseries_eeh_get_state,
794 .reset = pseries_eeh_reset,
795 .get_log = pseries_eeh_get_log,
796 .configure_bridge = pseries_eeh_configure_bridge,
797 .err_inject = NULL,
798 .read_config = pseries_eeh_read_config,
799 .write_config = pseries_eeh_write_config,
800 .next_error = NULL,
801 .restore_config = pseries_eeh_restore_config,
802#ifdef CONFIG_PCI_IOV
803 .notify_resume = pseries_notify_resume
804#endif
805};
806
807
808
809
810
811
812
813static int __init eeh_pseries_init(void)
814{
815 int ret;
816
817 ret = eeh_ops_register(&pseries_eeh_ops);
818 if (!ret)
819 pr_info("EEH: pSeries platform initialized\n");
820 else
821 pr_info("EEH: pSeries platform initialization failure (%d)\n",
822 ret);
823
824 return ret;
825}
826machine_early_initcall(pseries, eeh_pseries_init);
827