1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/module.h>
20#include <linux/pci.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/pm.h>
24#include <linux/suspend.h>
25#include <linux/delay.h>
26#include <linux/slab.h>
27#include <linux/kfifo.h>
28#include "aerdrv.h"
29
30static bool forceload;
31static bool nosourceid;
32module_param(forceload, bool, 0);
33module_param(nosourceid, bool, 0);
34
35#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
36 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
37
38int pci_enable_pcie_error_reporting(struct pci_dev *dev)
39{
40 if (pcie_aer_get_firmware_first(dev))
41 return -EIO;
42
43 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
44 return -EIO;
45
46 return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
47}
48EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
49
50int pci_disable_pcie_error_reporting(struct pci_dev *dev)
51{
52 if (pcie_aer_get_firmware_first(dev))
53 return -EIO;
54
55 return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
56 PCI_EXP_AER_FLAGS);
57}
58EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
59
60int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
61{
62 int pos;
63 u32 status;
64
65 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
66 if (!pos)
67 return -EIO;
68
69 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
70 if (status)
71 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
72
73 return 0;
74}
75EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
76
77
78
79
80
81
82static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
83{
84 if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
85 e_info->dev[e_info->error_dev_num] = dev;
86 e_info->error_dev_num++;
87 return 0;
88 }
89 return -ENOSPC;
90}
91
92
93
94
95
96
97static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
98{
99 int pos;
100 u32 status, mask;
101 u16 reg16;
102
103
104
105
106
107 if (!nosourceid && (PCI_BUS_NUM(e_info->id) != 0)) {
108
109 if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
110 return true;
111
112
113 if (!e_info->multi_error_valid)
114 return false;
115 }
116
117
118
119
120
121
122
123
124
125 if (atomic_read(&dev->enable_cnt) == 0)
126 return false;
127
128
129 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, ®16);
130 if (!(reg16 & PCI_EXP_AER_FLAGS))
131 return false;
132
133 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
134 if (!pos)
135 return false;
136
137
138 if (e_info->severity == AER_CORRECTABLE) {
139 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
140 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
141 } else {
142 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
143 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
144 }
145 if (status & ~mask)
146 return true;
147
148 return false;
149}
150
151static int find_device_iter(struct pci_dev *dev, void *data)
152{
153 struct aer_err_info *e_info = (struct aer_err_info *)data;
154
155 if (is_error_source(dev, e_info)) {
156
157 if (add_error_device(e_info, dev)) {
158
159
160 return 1;
161 }
162
163
164 if (!e_info->multi_error_valid)
165 return 1;
166 }
167 return 0;
168}
169
170
171
172
173
174
175
176
177
178
179
180
181
182static bool find_source_device(struct pci_dev *parent,
183 struct aer_err_info *e_info)
184{
185 struct pci_dev *dev = parent;
186 int result;
187
188
189 e_info->error_dev_num = 0;
190
191
192 result = find_device_iter(dev, e_info);
193 if (result)
194 return true;
195
196 pci_walk_bus(parent->subordinate, find_device_iter, e_info);
197
198 if (!e_info->error_dev_num) {
199 dev_printk(KERN_DEBUG, &parent->dev,
200 "can't find device of ID%04x\n",
201 e_info->id);
202 return false;
203 }
204 return true;
205}
206
207static int report_error_detected(struct pci_dev *dev, void *data)
208{
209 pci_ers_result_t vote;
210 const struct pci_error_handlers *err_handler;
211 struct aer_broadcast_data *result_data;
212 result_data = (struct aer_broadcast_data *) data;
213
214 device_lock(&dev->dev);
215 dev->error_state = result_data->state;
216
217 if (!dev->driver ||
218 !dev->driver->err_handler ||
219 !dev->driver->err_handler->error_detected) {
220 if (result_data->state == pci_channel_io_frozen &&
221 !(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) {
222
223
224
225
226
227
228
229 dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n",
230 dev->driver ?
231 "no AER-aware driver" : "no driver");
232 }
233
234
235
236
237
238
239
240
241
242
243
244 if (!(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
245 vote = PCI_ERS_RESULT_NO_AER_DRIVER;
246 else
247 vote = PCI_ERS_RESULT_NONE;
248 } else {
249 err_handler = dev->driver->err_handler;
250 vote = err_handler->error_detected(dev, result_data->state);
251 }
252
253 result_data->result = merge_result(result_data->result, vote);
254 device_unlock(&dev->dev);
255 return 0;
256}
257
258static int report_mmio_enabled(struct pci_dev *dev, void *data)
259{
260 pci_ers_result_t vote;
261 const struct pci_error_handlers *err_handler;
262 struct aer_broadcast_data *result_data;
263 result_data = (struct aer_broadcast_data *) data;
264
265 device_lock(&dev->dev);
266 if (!dev->driver ||
267 !dev->driver->err_handler ||
268 !dev->driver->err_handler->mmio_enabled)
269 goto out;
270
271 err_handler = dev->driver->err_handler;
272 vote = err_handler->mmio_enabled(dev);
273 result_data->result = merge_result(result_data->result, vote);
274out:
275 device_unlock(&dev->dev);
276 return 0;
277}
278
279static int report_slot_reset(struct pci_dev *dev, void *data)
280{
281 pci_ers_result_t vote;
282 const struct pci_error_handlers *err_handler;
283 struct aer_broadcast_data *result_data;
284 result_data = (struct aer_broadcast_data *) data;
285
286 device_lock(&dev->dev);
287 if (!dev->driver ||
288 !dev->driver->err_handler ||
289 !dev->driver->err_handler->slot_reset)
290 goto out;
291
292 err_handler = dev->driver->err_handler;
293 vote = err_handler->slot_reset(dev);
294 result_data->result = merge_result(result_data->result, vote);
295out:
296 device_unlock(&dev->dev);
297 return 0;
298}
299
300static int report_resume(struct pci_dev *dev, void *data)
301{
302 const struct pci_error_handlers *err_handler;
303
304 device_lock(&dev->dev);
305 dev->error_state = pci_channel_io_normal;
306
307 if (!dev->driver ||
308 !dev->driver->err_handler ||
309 !dev->driver->err_handler->resume)
310 goto out;
311
312 err_handler = dev->driver->err_handler;
313 err_handler->resume(dev);
314out:
315 device_unlock(&dev->dev);
316 return 0;
317}
318
319
320
321
322
323
324
325
326
327
328
329
330static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
331 enum pci_channel_state state,
332 char *error_mesg,
333 int (*cb)(struct pci_dev *, void *))
334{
335 struct aer_broadcast_data result_data;
336
337 dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg);
338 result_data.state = state;
339 if (cb == report_error_detected)
340 result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
341 else
342 result_data.result = PCI_ERS_RESULT_RECOVERED;
343
344 if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
345
346
347
348
349
350
351 if (cb == report_error_detected)
352 dev->error_state = state;
353 pci_walk_bus(dev->subordinate, cb, &result_data);
354 if (cb == report_resume) {
355 pci_cleanup_aer_uncorrect_error_status(dev);
356 dev->error_state = pci_channel_io_normal;
357 }
358 } else {
359
360
361
362
363 pci_walk_bus(dev->bus, cb, &result_data);
364 }
365
366 return result_data.result;
367}
368
369
370
371
372
373
374
375
376static pci_ers_result_t default_reset_link(struct pci_dev *dev)
377{
378 pci_reset_bridge_secondary_bus(dev);
379 dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n");
380 return PCI_ERS_RESULT_RECOVERED;
381}
382
383static int find_aer_service_iter(struct device *device, void *data)
384{
385 struct pcie_port_service_driver *service_driver, **drv;
386
387 drv = (struct pcie_port_service_driver **) data;
388
389 if (device->bus == &pcie_port_bus_type && device->driver) {
390 service_driver = to_service_driver(device->driver);
391 if (service_driver->service == PCIE_PORT_SERVICE_AER) {
392 *drv = service_driver;
393 return 1;
394 }
395 }
396
397 return 0;
398}
399
400static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
401{
402 struct pcie_port_service_driver *drv = NULL;
403
404 device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
405
406 return drv;
407}
408
409static pci_ers_result_t reset_link(struct pci_dev *dev)
410{
411 struct pci_dev *udev;
412 pci_ers_result_t status;
413 struct pcie_port_service_driver *driver;
414
415 if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
416
417 udev = dev;
418 } else {
419
420 udev = dev->bus->self;
421 }
422
423
424 driver = find_aer_service(udev);
425
426 if (driver && driver->reset_link) {
427 status = driver->reset_link(udev);
428 } else if (pci_pcie_type(udev) == PCI_EXP_TYPE_DOWNSTREAM ||
429 pci_pcie_type(udev) == PCI_EXP_TYPE_ROOT_PORT) {
430 status = default_reset_link(udev);
431 } else {
432 dev_printk(KERN_DEBUG, &dev->dev,
433 "no link-reset support at upstream device %s\n",
434 pci_name(udev));
435 return PCI_ERS_RESULT_DISCONNECT;
436 }
437
438 if (status != PCI_ERS_RESULT_RECOVERED) {
439 dev_printk(KERN_DEBUG, &dev->dev,
440 "link reset at upstream device %s failed\n",
441 pci_name(udev));
442 return PCI_ERS_RESULT_DISCONNECT;
443 }
444
445 return status;
446}
447
448
449
450
451
452
453
454
455
456
457static void do_recovery(struct pci_dev *dev, int severity)
458{
459 pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
460 enum pci_channel_state state;
461
462 if (severity == AER_FATAL)
463 state = pci_channel_io_frozen;
464 else
465 state = pci_channel_io_normal;
466
467 status = broadcast_error_message(dev,
468 state,
469 "error_detected",
470 report_error_detected);
471
472 if (severity == AER_FATAL) {
473 result = reset_link(dev);
474 if (result != PCI_ERS_RESULT_RECOVERED)
475 goto failed;
476 }
477
478 if (status == PCI_ERS_RESULT_CAN_RECOVER)
479 status = broadcast_error_message(dev,
480 state,
481 "mmio_enabled",
482 report_mmio_enabled);
483
484 if (status == PCI_ERS_RESULT_NEED_RESET) {
485
486
487
488
489
490 status = broadcast_error_message(dev,
491 state,
492 "slot_reset",
493 report_slot_reset);
494 }
495
496 if (status != PCI_ERS_RESULT_RECOVERED)
497 goto failed;
498
499 broadcast_error_message(dev,
500 state,
501 "resume",
502 report_resume);
503
504 dev_info(&dev->dev, "AER: Device recovery successful\n");
505 return;
506
507failed:
508
509 dev_info(&dev->dev, "AER: Device recovery failed\n");
510}
511
512
513
514
515
516
517
518
519
520static void handle_error_source(struct pcie_device *aerdev,
521 struct pci_dev *dev,
522 struct aer_err_info *info)
523{
524 int pos;
525
526 if (info->severity == AER_CORRECTABLE) {
527
528
529
530
531 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
532 if (pos)
533 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
534 info->status);
535 } else
536 do_recovery(dev, info->severity);
537}
538
539#ifdef CONFIG_ACPI_APEI_PCIEAER
540static void aer_recover_work_func(struct work_struct *work);
541
542#define AER_RECOVER_RING_ORDER 4
543#define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
544
545struct aer_recover_entry
546{
547 u8 bus;
548 u8 devfn;
549 u16 domain;
550 int severity;
551 struct aer_capability_regs *regs;
552};
553
554static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
555 AER_RECOVER_RING_SIZE);
556
557
558
559
560
561static DEFINE_SPINLOCK(aer_recover_ring_lock);
562static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
563
564void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
565 int severity, struct aer_capability_regs *aer_regs)
566{
567 unsigned long flags;
568 struct aer_recover_entry entry = {
569 .bus = bus,
570 .devfn = devfn,
571 .domain = domain,
572 .severity = severity,
573 .regs = aer_regs,
574 };
575
576 spin_lock_irqsave(&aer_recover_ring_lock, flags);
577 if (kfifo_put(&aer_recover_ring, entry))
578 schedule_work(&aer_recover_work);
579 else
580 pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
581 domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
582 spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
583}
584EXPORT_SYMBOL_GPL(aer_recover_queue);
585
586static void aer_recover_work_func(struct work_struct *work)
587{
588 struct aer_recover_entry entry;
589 struct pci_dev *pdev;
590
591 while (kfifo_get(&aer_recover_ring, &entry)) {
592 pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
593 entry.devfn);
594 if (!pdev) {
595 pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
596 entry.domain, entry.bus,
597 PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
598 continue;
599 }
600 cper_print_aer(pdev, entry.severity, entry.regs);
601 do_recovery(pdev, entry.severity);
602 pci_dev_put(pdev);
603 }
604}
605#endif
606
607
608
609
610
611
612
613
614
615
616static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
617{
618 int pos, temp;
619
620
621 info->status = 0;
622 info->tlp_header_valid = 0;
623
624 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
625
626
627 if (!pos)
628 return 1;
629
630 if (info->severity == AER_CORRECTABLE) {
631 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
632 &info->status);
633 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
634 &info->mask);
635 if (!(info->status & ~info->mask))
636 return 0;
637 } else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE ||
638 info->severity == AER_NONFATAL) {
639
640
641 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
642 &info->status);
643 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
644 &info->mask);
645 if (!(info->status & ~info->mask))
646 return 0;
647
648
649 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
650 info->first_error = PCI_ERR_CAP_FEP(temp);
651
652 if (info->status & AER_LOG_TLP_MASKS) {
653 info->tlp_header_valid = 1;
654 pci_read_config_dword(dev,
655 pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
656 pci_read_config_dword(dev,
657 pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
658 pci_read_config_dword(dev,
659 pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
660 pci_read_config_dword(dev,
661 pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
662 }
663 }
664
665 return 1;
666}
667
668static inline void aer_process_err_devices(struct pcie_device *p_device,
669 struct aer_err_info *e_info)
670{
671 int i;
672
673
674 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
675 if (get_device_error_info(e_info->dev[i], e_info))
676 aer_print_error(e_info->dev[i], e_info);
677 }
678 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
679 if (get_device_error_info(e_info->dev[i], e_info))
680 handle_error_source(p_device, e_info->dev[i], e_info);
681 }
682}
683
684
685
686
687
688
689static void aer_isr_one_error(struct pcie_device *p_device,
690 struct aer_err_source *e_src)
691{
692 struct aer_err_info *e_info;
693
694
695 e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
696 if (!e_info) {
697 dev_printk(KERN_DEBUG, &p_device->port->dev,
698 "Can't allocate mem when processing AER errors\n");
699 return;
700 }
701
702
703
704
705
706 if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
707 e_info->id = ERR_COR_ID(e_src->id);
708 e_info->severity = AER_CORRECTABLE;
709
710 if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
711 e_info->multi_error_valid = 1;
712 else
713 e_info->multi_error_valid = 0;
714
715 aer_print_port_info(p_device->port, e_info);
716
717 if (find_source_device(p_device->port, e_info))
718 aer_process_err_devices(p_device, e_info);
719 }
720
721 if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
722 e_info->id = ERR_UNCOR_ID(e_src->id);
723
724 if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
725 e_info->severity = AER_FATAL;
726 else
727 e_info->severity = AER_NONFATAL;
728
729 if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
730 e_info->multi_error_valid = 1;
731 else
732 e_info->multi_error_valid = 0;
733
734 aer_print_port_info(p_device->port, e_info);
735
736 if (find_source_device(p_device->port, e_info))
737 aer_process_err_devices(p_device, e_info);
738 }
739
740 kfree(e_info);
741}
742
743
744
745
746
747
748
749
750
751
752static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
753{
754 unsigned long flags;
755
756
757 spin_lock_irqsave(&rpc->e_lock, flags);
758 if (rpc->prod_idx == rpc->cons_idx) {
759 spin_unlock_irqrestore(&rpc->e_lock, flags);
760 return 0;
761 }
762
763 *e_src = rpc->e_sources[rpc->cons_idx];
764 rpc->cons_idx++;
765 if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
766 rpc->cons_idx = 0;
767 spin_unlock_irqrestore(&rpc->e_lock, flags);
768
769 return 1;
770}
771
772
773
774
775
776
777
778void aer_isr(struct work_struct *work)
779{
780 struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
781 struct pcie_device *p_device = rpc->rpd;
782 struct aer_err_source uninitialized_var(e_src);
783
784 mutex_lock(&rpc->rpc_mutex);
785 while (get_e_source(rpc, &e_src))
786 aer_isr_one_error(p_device, &e_src);
787 mutex_unlock(&rpc->rpc_mutex);
788
789 wake_up(&rpc->wait_release);
790}
791
792
793
794
795
796
797
798int aer_init(struct pcie_device *dev)
799{
800 if (forceload) {
801 dev_printk(KERN_DEBUG, &dev->device,
802 "aerdrv forceload requested.\n");
803 pcie_aer_force_firmware_first(dev->port, 0);
804 }
805 return 0;
806}
807