1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/pci.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/acpi.h>
22#include <linux/dmi.h>
23#include <linux/ioport.h>
24#include <linux/sched.h>
25#include <linux/ktime.h>
26#include <linux/mm.h>
27#include <linux/nvme.h>
28#include <linux/platform_data/x86/apple.h>
29#include <linux/pm_runtime.h>
30#include <linux/suspend.h>
31#include <linux/switchtec.h>
32#include <asm/dma.h>
33#include "pci.h"
34
35static ktime_t fixup_debug_start(struct pci_dev *dev,
36 void (*fn)(struct pci_dev *dev))
37{
38 if (initcall_debug)
39 pci_info(dev, "calling %pS @ %i\n", fn, task_pid_nr(current));
40
41 return ktime_get();
42}
43
44static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
45 void (*fn)(struct pci_dev *dev))
46{
47 ktime_t delta, rettime;
48 unsigned long long duration;
49
50 rettime = ktime_get();
51 delta = ktime_sub(rettime, calltime);
52 duration = (unsigned long long) ktime_to_ns(delta) >> 10;
53 if (initcall_debug || duration > 10000)
54 pci_info(dev, "%pS took %lld usecs\n", fn, duration);
55}
56
57static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
58 struct pci_fixup *end)
59{
60 ktime_t calltime;
61
62 for (; f < end; f++)
63 if ((f->class == (u32) (dev->class >> f->class_shift) ||
64 f->class == (u32) PCI_ANY_ID) &&
65 (f->vendor == dev->vendor ||
66 f->vendor == (u16) PCI_ANY_ID) &&
67 (f->device == dev->device ||
68 f->device == (u16) PCI_ANY_ID)) {
69 void (*hook)(struct pci_dev *dev);
70#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
71 hook = offset_to_ptr(&f->hook_offset);
72#else
73 hook = f->hook;
74#endif
75 calltime = fixup_debug_start(dev, hook);
76 hook(dev);
77 fixup_debug_report(dev, calltime, hook);
78 }
79}
80
81extern struct pci_fixup __start_pci_fixups_early[];
82extern struct pci_fixup __end_pci_fixups_early[];
83extern struct pci_fixup __start_pci_fixups_header[];
84extern struct pci_fixup __end_pci_fixups_header[];
85extern struct pci_fixup __start_pci_fixups_final[];
86extern struct pci_fixup __end_pci_fixups_final[];
87extern struct pci_fixup __start_pci_fixups_enable[];
88extern struct pci_fixup __end_pci_fixups_enable[];
89extern struct pci_fixup __start_pci_fixups_resume[];
90extern struct pci_fixup __end_pci_fixups_resume[];
91extern struct pci_fixup __start_pci_fixups_resume_early[];
92extern struct pci_fixup __end_pci_fixups_resume_early[];
93extern struct pci_fixup __start_pci_fixups_suspend[];
94extern struct pci_fixup __end_pci_fixups_suspend[];
95extern struct pci_fixup __start_pci_fixups_suspend_late[];
96extern struct pci_fixup __end_pci_fixups_suspend_late[];
97
98static bool pci_apply_fixup_final_quirks;
99
100void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
101{
102 struct pci_fixup *start, *end;
103
104 switch (pass) {
105 case pci_fixup_early:
106 start = __start_pci_fixups_early;
107 end = __end_pci_fixups_early;
108 break;
109
110 case pci_fixup_header:
111 start = __start_pci_fixups_header;
112 end = __end_pci_fixups_header;
113 break;
114
115 case pci_fixup_final:
116 if (!pci_apply_fixup_final_quirks)
117 return;
118 start = __start_pci_fixups_final;
119 end = __end_pci_fixups_final;
120 break;
121
122 case pci_fixup_enable:
123 start = __start_pci_fixups_enable;
124 end = __end_pci_fixups_enable;
125 break;
126
127 case pci_fixup_resume:
128 start = __start_pci_fixups_resume;
129 end = __end_pci_fixups_resume;
130 break;
131
132 case pci_fixup_resume_early:
133 start = __start_pci_fixups_resume_early;
134 end = __end_pci_fixups_resume_early;
135 break;
136
137 case pci_fixup_suspend:
138 start = __start_pci_fixups_suspend;
139 end = __end_pci_fixups_suspend;
140 break;
141
142 case pci_fixup_suspend_late:
143 start = __start_pci_fixups_suspend_late;
144 end = __end_pci_fixups_suspend_late;
145 break;
146
147 default:
148
149 return;
150 }
151 pci_do_fixups(dev, start, end);
152}
153EXPORT_SYMBOL(pci_fixup_device);
154
155static int __init pci_apply_final_quirks(void)
156{
157 struct pci_dev *dev = NULL;
158 u8 cls = 0;
159 u8 tmp;
160
161 if (pci_cache_line_size)
162 pr_info("PCI: CLS %u bytes\n", pci_cache_line_size << 2);
163
164 pci_apply_fixup_final_quirks = true;
165 for_each_pci_dev(dev) {
166 pci_fixup_device(pci_fixup_final, dev);
167
168
169
170
171
172 if (!pci_cache_line_size) {
173 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
174 if (!cls)
175 cls = tmp;
176 if (!tmp || cls == tmp)
177 continue;
178
179 pci_info(dev, "CLS mismatch (%u != %u), using %u bytes\n",
180 cls << 2, tmp << 2,
181 pci_dfl_cache_line_size << 2);
182 pci_cache_line_size = pci_dfl_cache_line_size;
183 }
184 }
185
186 if (!pci_cache_line_size) {
187 pr_info("PCI: CLS %u bytes, default %u\n", cls << 2,
188 pci_dfl_cache_line_size << 2);
189 pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
190 }
191
192 return 0;
193}
194fs_initcall_sync(pci_apply_final_quirks);
195
196
197
198
199
200
201
202static void quirk_mmio_always_on(struct pci_dev *dev)
203{
204 dev->mmio_always_on = 1;
205}
206DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
207 PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
208
209
210
211
212
213DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, pci_disable_parity);
214DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, pci_disable_parity);
215
216
217
218
219
220static void quirk_passive_release(struct pci_dev *dev)
221{
222 struct pci_dev *d = NULL;
223 unsigned char dlc;
224
225
226
227
228
229 while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
230 pci_read_config_byte(d, 0x82, &dlc);
231 if (!(dlc & 1<<1)) {
232 pci_info(d, "PIIX3: Enabling Passive Release\n");
233 dlc |= 1<<1;
234 pci_write_config_byte(d, 0x82, dlc);
235 }
236 }
237}
238DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
239DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
240
241
242
243
244
245
246
247
248
249static void quirk_isa_dma_hangs(struct pci_dev *dev)
250{
251 if (!isa_dma_bridge_buggy) {
252 isa_dma_bridge_buggy = 1;
253 pci_info(dev, "Activating ISA DMA hang workarounds\n");
254 }
255}
256
257
258
259
260DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
261DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
262DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
263DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
264DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
265DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
266DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
267
268
269
270
271
272static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
273{
274 u32 pmbase;
275 u16 pm1a;
276
277 pci_read_config_dword(dev, 0x40, &pmbase);
278 pmbase = pmbase & 0xff80;
279 pm1a = inw(pmbase);
280
281 if (pm1a & 0x10) {
282 pci_info(dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
283 outw(0x10, pmbase);
284 }
285}
286DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
287
288
289static void quirk_nopcipci(struct pci_dev *dev)
290{
291 if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
292 pci_info(dev, "Disabling direct PCI/PCI transfers\n");
293 pci_pci_problems |= PCIPCI_FAIL;
294 }
295}
296DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci);
297DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci);
298
299static void quirk_nopciamd(struct pci_dev *dev)
300{
301 u8 rev;
302 pci_read_config_byte(dev, 0x08, &rev);
303 if (rev == 0x13) {
304
305 pci_info(dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
306 pci_pci_problems |= PCIAGP_FAIL;
307 }
308}
309DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd);
310
311
312static void quirk_triton(struct pci_dev *dev)
313{
314 if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
315 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
316 pci_pci_problems |= PCIPCI_TRITON;
317 }
318}
319DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
320DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
321DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
322DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
323
324
325
326
327
328
329
330
331
332
333
334static void quirk_vialatency(struct pci_dev *dev)
335{
336 struct pci_dev *p;
337 u8 busarb;
338
339
340
341
342
343 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
344 if (p != NULL) {
345
346
347
348
349
350
351 if (p->revision < 0x40 || p->revision > 0x42)
352 goto exit;
353 } else {
354 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
355 if (p == NULL)
356 goto exit;
357
358
359 if (p->revision < 0x10 || p->revision > 0x12)
360 goto exit;
361 }
362
363
364
365
366
367
368
369
370
371
372
373
374
375 pci_read_config_byte(dev, 0x76, &busarb);
376
377
378
379
380
381 busarb &= ~(1<<5);
382 busarb |= (1<<4);
383 pci_write_config_byte(dev, 0x76, busarb);
384 pci_info(dev, "Applying VIA southbridge workaround\n");
385exit:
386 pci_dev_put(p);
387}
388DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
389DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
390DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
391
392DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
393DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
394DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
395
396
397static void quirk_viaetbf(struct pci_dev *dev)
398{
399 if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
400 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
401 pci_pci_problems |= PCIPCI_VIAETBF;
402 }
403}
404DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf);
405
406static void quirk_vsfx(struct pci_dev *dev)
407{
408 if ((pci_pci_problems&PCIPCI_VSFX) == 0) {
409 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
410 pci_pci_problems |= PCIPCI_VSFX;
411 }
412}
413DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx);
414
415
416
417
418
419
420static void quirk_alimagik(struct pci_dev *dev)
421{
422 if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) {
423 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
424 pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
425 }
426}
427DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
428DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
429
430
431static void quirk_natoma(struct pci_dev *dev)
432{
433 if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
434 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
435 pci_pci_problems |= PCIPCI_NATOMA;
436 }
437}
438DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
439DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
440DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
441DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
442DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
443DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
444
445
446
447
448
449static void quirk_citrine(struct pci_dev *dev)
450{
451 dev->cfg_size = 0xA0;
452}
453DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
454
455
456
457
458
459static void quirk_nfp6000(struct pci_dev *dev)
460{
461 dev->cfg_size = 0x600;
462}
463DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000);
464DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000);
465DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, quirk_nfp6000);
466DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000);
467
468
469static void quirk_extend_bar_to_page(struct pci_dev *dev)
470{
471 int i;
472
473 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
474 struct resource *r = &dev->resource[i];
475
476 if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
477 r->end = PAGE_SIZE - 1;
478 r->start = 0;
479 r->flags |= IORESOURCE_UNSET;
480 pci_info(dev, "expanded BAR %d to page size: %pR\n",
481 i, r);
482 }
483 }
484}
485DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
486
487
488
489
490
491static void quirk_s3_64M(struct pci_dev *dev)
492{
493 struct resource *r = &dev->resource[0];
494
495 if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
496 r->flags |= IORESOURCE_UNSET;
497 r->start = 0;
498 r->end = 0x3ffffff;
499 }
500}
501DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
502DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
503
504static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
505 const char *name)
506{
507 u32 region;
508 struct pci_bus_region bus_region;
509 struct resource *res = dev->resource + pos;
510
511 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion);
512
513 if (!region)
514 return;
515
516 res->name = pci_name(dev);
517 res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
518 res->flags |=
519 (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
520 region &= ~(size - 1);
521
522
523 bus_region.start = region;
524 bus_region.end = region + size - 1;
525 pcibios_bus_to_resource(dev->bus, res, &bus_region);
526
527 pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
528 name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
529}
530
531
532
533
534
535
536
537
538
539
540static void quirk_cs5536_vsa(struct pci_dev *dev)
541{
542 static char *name = "CS5536 ISA bridge";
543
544 if (pci_resource_len(dev, 0) != 8) {
545 quirk_io(dev, 0, 8, name);
546 quirk_io(dev, 1, 256, name);
547 quirk_io(dev, 2, 64, name);
548 pci_info(dev, "%s bug detected (incorrect header); workaround applied\n",
549 name);
550 }
551}
552DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
553
554static void quirk_io_region(struct pci_dev *dev, int port,
555 unsigned size, int nr, const char *name)
556{
557 u16 region;
558 struct pci_bus_region bus_region;
559 struct resource *res = dev->resource + nr;
560
561 pci_read_config_word(dev, port, ®ion);
562 region &= ~(size - 1);
563
564 if (!region)
565 return;
566
567 res->name = pci_name(dev);
568 res->flags = IORESOURCE_IO;
569
570
571 bus_region.start = region;
572 bus_region.end = region + size - 1;
573 pcibios_bus_to_resource(dev->bus, res, &bus_region);
574
575 if (!pci_claim_resource(dev, nr))
576 pci_info(dev, "quirk: %pR claimed by %s\n", res, name);
577}
578
579
580
581
582
583static void quirk_ati_exploding_mce(struct pci_dev *dev)
584{
585 pci_info(dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
586
587 request_region(0x3b0, 0x0C, "RadeonIGP");
588 request_region(0x3d3, 0x01, "RadeonIGP");
589}
590DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce);
591
592
593
594
595
596
597
598
599
600
601
602
603static void quirk_amd_nl_class(struct pci_dev *pdev)
604{
605 u32 class = pdev->class;
606
607
608 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
609 pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
610 class, pdev->class);
611}
612DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
613 quirk_amd_nl_class);
614
615
616
617
618
619
620
621
622static void quirk_synopsys_haps(struct pci_dev *pdev)
623{
624 u32 class = pdev->class;
625
626 switch (pdev->device) {
627 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3:
628 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI:
629 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31:
630 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
631 pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
632 class, pdev->class);
633 break;
634 }
635}
636DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
637 PCI_CLASS_SERIAL_USB_XHCI, 0,
638 quirk_synopsys_haps);
639
640
641
642
643
644
645
646
647
648
649
650static void quirk_ali7101_acpi(struct pci_dev *dev)
651{
652 quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
653 quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
654}
655DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi);
656
657static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
658{
659 u32 devres;
660 u32 mask, size, base;
661
662 pci_read_config_dword(dev, port, &devres);
663 if ((devres & enable) != enable)
664 return;
665 mask = (devres >> 16) & 15;
666 base = devres & 0xffff;
667 size = 16;
668 for (;;) {
669 unsigned bit = size >> 1;
670 if ((bit & mask) == bit)
671 break;
672 size = bit;
673 }
674
675
676
677
678
679 base &= -size;
680 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
681}
682
683static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
684{
685 u32 devres;
686 u32 mask, size, base;
687
688 pci_read_config_dword(dev, port, &devres);
689 if ((devres & enable) != enable)
690 return;
691 base = devres & 0xffff0000;
692 mask = (devres & 0x3f) << 16;
693 size = 128 << 16;
694 for (;;) {
695 unsigned bit = size >> 1;
696 if ((bit & mask) == bit)
697 break;
698 size = bit;
699 }
700
701
702
703
704
705 base &= -size;
706 pci_info(dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
707}
708
709
710
711
712
713
714
715static void quirk_piix4_acpi(struct pci_dev *dev)
716{
717 u32 res_a;
718
719 quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
720 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
721
722
723 pci_read_config_dword(dev, 0x5c, &res_a);
724
725 piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
726 piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
727
728
729
730
731 if (res_a & (1 << 29)) {
732 piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
733 piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
734 }
735
736 if (res_a & (1 << 30)) {
737 piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
738 piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
739 }
740 piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
741 piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
742}
743DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi);
744DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi);
745
746#define ICH_PMBASE 0x40
747#define ICH_ACPI_CNTL 0x44
748#define ICH4_ACPI_EN 0x10
749#define ICH6_ACPI_EN 0x80
750#define ICH4_GPIOBASE 0x58
751#define ICH4_GPIO_CNTL 0x5c
752#define ICH4_GPIO_EN 0x10
753#define ICH6_GPIOBASE 0x48
754#define ICH6_GPIO_CNTL 0x4c
755#define ICH6_GPIO_EN 0x10
756
757
758
759
760
761
762static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
763{
764 u8 enable;
765
766
767
768
769
770
771
772
773 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
774 if (enable & ICH4_ACPI_EN)
775 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
776 "ICH4 ACPI/GPIO/TCO");
777
778 pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
779 if (enable & ICH4_GPIO_EN)
780 quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
781 "ICH4 GPIO");
782}
783DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
784DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
785DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi);
786DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi);
787DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi);
788DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi);
789DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi);
790DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi);
791DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi);
792DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi);
793
794static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
795{
796 u8 enable;
797
798 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
799 if (enable & ICH6_ACPI_EN)
800 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
801 "ICH6 ACPI/GPIO/TCO");
802
803 pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
804 if (enable & ICH6_GPIO_EN)
805 quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
806 "ICH6 GPIO");
807}
808
809static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
810 const char *name, int dynsize)
811{
812 u32 val;
813 u32 size, base;
814
815 pci_read_config_dword(dev, reg, &val);
816
817
818 if (!(val & 1))
819 return;
820 base = val & 0xfffc;
821 if (dynsize) {
822
823
824
825
826
827
828 size = 16;
829 } else {
830 size = 128;
831 }
832 base &= ~(size-1);
833
834
835
836
837
838 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
839}
840
841static void quirk_ich6_lpc(struct pci_dev *dev)
842{
843
844 ich6_lpc_acpi_gpio(dev);
845
846
847 ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
848 ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
849}
850DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
851DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
852
853static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
854 const char *name)
855{
856 u32 val;
857 u32 mask, base;
858
859 pci_read_config_dword(dev, reg, &val);
860
861
862 if (!(val & 1))
863 return;
864
865
866 base = val & 0xfffc;
867 mask = (val >> 16) & 0xfc;
868 mask |= 3;
869
870
871
872
873
874 pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
875}
876
877
878static void quirk_ich7_lpc(struct pci_dev *dev)
879{
880
881 ich6_lpc_acpi_gpio(dev);
882
883
884 ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
885 ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
886 ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
887 ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
888}
889DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc);
890DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc);
891DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc);
892DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc);
893DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc);
894DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc);
895DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc);
896DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc);
897DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc);
898DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc);
899DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc);
900DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc);
901DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc);
902
903
904
905
906
907static void quirk_vt82c586_acpi(struct pci_dev *dev)
908{
909 if (dev->revision & 0x10)
910 quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
911 "vt82c586 ACPI");
912}
913DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi);
914
915
916
917
918
919
920
921static void quirk_vt82c686_acpi(struct pci_dev *dev)
922{
923 quirk_vt82c586_acpi(dev);
924
925 quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
926 "vt82c686 HW-mon");
927
928 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
929}
930DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi);
931
932
933
934
935
936
937static void quirk_vt8235_acpi(struct pci_dev *dev)
938{
939 quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
940 quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
941}
942DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
943
944
945
946
947
948static void quirk_xio2000a(struct pci_dev *dev)
949{
950 struct pci_dev *pdev;
951 u16 command;
952
953 pci_warn(dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
954 list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
955 pci_read_config_word(pdev, PCI_COMMAND, &command);
956 if (command & PCI_COMMAND_FAST_BACK)
957 pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
958 }
959}
960DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
961 quirk_xio2000a);
962
963#ifdef CONFIG_X86_IO_APIC
964
965#include <asm/io_apic.h>
966
967
968
969
970
971
972
973
974static void quirk_via_ioapic(struct pci_dev *dev)
975{
976 u8 tmp;
977
978 if (nr_ioapics < 1)
979 tmp = 0;
980 else
981 tmp = 0x1f;
982
983 pci_info(dev, "%sbling VIA external APIC routing\n",
984 tmp == 0 ? "Disa" : "Ena");
985
986
987 pci_write_config_byte(dev, 0x58, tmp);
988}
989DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
990DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
991
992
993
994
995
996
997
998static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
999{
1000 u8 misc_control2;
1001#define BYPASS_APIC_DEASSERT 8
1002
1003 pci_read_config_byte(dev, 0x5B, &misc_control2);
1004 if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
1005 pci_info(dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
1006 pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
1007 }
1008}
1009DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
1010DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021static void quirk_amd_ioapic(struct pci_dev *dev)
1022{
1023 if (dev->revision >= 0x02) {
1024 pci_warn(dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
1025 pci_warn(dev, " : booting with the \"noapic\" option\n");
1026 }
1027}
1028DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
1029#endif
1030
1031#if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS)
1032
1033static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
1034{
1035
1036 if (dev->subsystem_device == 0xa118)
1037 dev->sriov->link = dev->devfn;
1038}
1039DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link);
1040#endif
1041
1042
1043
1044
1045
1046static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
1047{
1048 if (dev->subordinate && dev->revision <= 0x12) {
1049 pci_info(dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
1050 dev->revision);
1051 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
1052 }
1053}
1054DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
1055
1056
1057
1058
1059
1060
1061
1062
1063static void quirk_via_acpi(struct pci_dev *d)
1064{
1065 u8 irq;
1066
1067
1068 pci_read_config_byte(d, 0x42, &irq);
1069 irq &= 0xf;
1070 if (irq && (irq != 2))
1071 d->irq = irq;
1072}
1073DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi);
1074DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi);
1075
1076
1077static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
1078
1079static void quirk_via_bridge(struct pci_dev *dev)
1080{
1081
1082 switch (dev->device) {
1083 case PCI_DEVICE_ID_VIA_82C686:
1084
1085
1086
1087
1088
1089 via_vlink_dev_lo = PCI_SLOT(dev->devfn);
1090 via_vlink_dev_hi = PCI_SLOT(dev->devfn);
1091 break;
1092 case PCI_DEVICE_ID_VIA_8237:
1093 case PCI_DEVICE_ID_VIA_8237A:
1094 via_vlink_dev_lo = 15;
1095 break;
1096 case PCI_DEVICE_ID_VIA_8235:
1097 via_vlink_dev_lo = 16;
1098 break;
1099 case PCI_DEVICE_ID_VIA_8231:
1100 case PCI_DEVICE_ID_VIA_8233_0:
1101 case PCI_DEVICE_ID_VIA_8233A:
1102 case PCI_DEVICE_ID_VIA_8233C_0:
1103 via_vlink_dev_lo = 17;
1104 break;
1105 }
1106}
1107DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_bridge);
1108DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, quirk_via_bridge);
1109DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0, quirk_via_bridge);
1110DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A, quirk_via_bridge);
1111DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233C_0, quirk_via_bridge);
1112DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_bridge);
1113DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge);
1114DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge);
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127static void quirk_via_vlink(struct pci_dev *dev)
1128{
1129 u8 irq, new_irq;
1130
1131
1132 if (via_vlink_dev_lo == -1)
1133 return;
1134
1135 new_irq = dev->irq;
1136
1137
1138 if (!new_irq || new_irq > 15)
1139 return;
1140
1141
1142 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi ||
1143 PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
1144 return;
1145
1146
1147
1148
1149
1150 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1151 if (new_irq != irq) {
1152 pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n",
1153 irq, new_irq);
1154 udelay(15);
1155 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
1156 }
1157}
1158DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
1159
1160
1161
1162
1163
1164
1165static void quirk_vt82c598_id(struct pci_dev *dev)
1166{
1167 pci_write_config_byte(dev, 0xfc, 0);
1168 pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
1169}
1170DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id);
1171
1172
1173
1174
1175
1176
1177
1178static void quirk_cardbus_legacy(struct pci_dev *dev)
1179{
1180 pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
1181}
1182DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1183 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
1184DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
1185 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
1186
1187
1188
1189
1190
1191
1192
1193
1194static void quirk_amd_ordering(struct pci_dev *dev)
1195{
1196 u32 pcic;
1197 pci_read_config_dword(dev, 0x4C, &pcic);
1198 if ((pcic & 6) != 6) {
1199 pcic |= 6;
1200 pci_warn(dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
1201 pci_write_config_dword(dev, 0x4C, pcic);
1202 pci_read_config_dword(dev, 0x84, &pcic);
1203 pcic |= (1 << 23);
1204 pci_write_config_dword(dev, 0x84, pcic);
1205 }
1206}
1207DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1208DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1209
1210
1211
1212
1213
1214
1215
1216
1217static void quirk_dunord(struct pci_dev *dev)
1218{
1219 struct resource *r = &dev->resource[1];
1220
1221 r->flags |= IORESOURCE_UNSET;
1222 r->start = 0;
1223 r->end = 0xffffff;
1224}
1225DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
1226
1227
1228
1229
1230
1231
1232static void quirk_transparent_bridge(struct pci_dev *dev)
1233{
1234 dev->transparent = 1;
1235}
1236DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge);
1237DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge);
1238
1239
1240
1241
1242
1243
1244
1245static void quirk_mediagx_master(struct pci_dev *dev)
1246{
1247 u8 reg;
1248
1249 pci_read_config_byte(dev, 0x41, ®);
1250 if (reg & 2) {
1251 reg &= ~2;
1252 pci_info(dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
1253 reg);
1254 pci_write_config_byte(dev, 0x41, reg);
1255 }
1256}
1257DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1258DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1259
1260
1261
1262
1263
1264
1265static void quirk_disable_pxb(struct pci_dev *pdev)
1266{
1267 u16 config;
1268
1269 if (pdev->revision != 0x04)
1270 return;
1271 pci_read_config_word(pdev, 0x40, &config);
1272 if (config & (1<<6)) {
1273 config &= ~(1<<6);
1274 pci_write_config_word(pdev, 0x40, config);
1275 pci_info(pdev, "C0 revision 450NX. Disabling PCI restreaming\n");
1276 }
1277}
1278DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
1279DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
1280
1281static void quirk_amd_ide_mode(struct pci_dev *pdev)
1282{
1283
1284 u8 tmp;
1285
1286 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
1287 if (tmp == 0x01) {
1288 pci_read_config_byte(pdev, 0x40, &tmp);
1289 pci_write_config_byte(pdev, 0x40, tmp|1);
1290 pci_write_config_byte(pdev, 0x9, 1);
1291 pci_write_config_byte(pdev, 0xa, 6);
1292 pci_write_config_byte(pdev, 0x40, tmp);
1293
1294 pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
1295 pci_info(pdev, "set SATA to AHCI mode\n");
1296 }
1297}
1298DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1299DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1300DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1301DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1302DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1303DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1304DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1305DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1306
1307
1308static void quirk_svwks_csb5ide(struct pci_dev *pdev)
1309{
1310 u8 prog;
1311 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1312 if (prog & 5) {
1313 prog &= ~5;
1314 pdev->class &= ~5;
1315 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1316
1317 }
1318}
1319DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
1320
1321
1322static void quirk_ide_samemode(struct pci_dev *pdev)
1323{
1324 u8 prog;
1325
1326 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1327
1328 if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
1329 pci_info(pdev, "IDE mode mismatch; forcing legacy mode\n");
1330 prog &= ~5;
1331 pdev->class &= ~5;
1332 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1333 }
1334}
1335DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
1336
1337
1338static void quirk_no_ata_d3(struct pci_dev *pdev)
1339{
1340 pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
1341}
1342
1343DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID,
1344 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1345DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
1346 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1347
1348DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
1349 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1350
1351
1352DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
1353 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1354
1355
1356
1357
1358
1359static void quirk_eisa_bridge(struct pci_dev *dev)
1360{
1361 dev->class = PCI_CLASS_BRIDGE_EISA << 8;
1362}
1363DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge);
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390static int asus_hides_smbus;
1391
1392static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
1393{
1394 if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1395 if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
1396 switch (dev->subsystem_device) {
1397 case 0x8025:
1398 case 0x8070:
1399 case 0x8088:
1400 case 0x1626:
1401 asus_hides_smbus = 1;
1402 }
1403 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
1404 switch (dev->subsystem_device) {
1405 case 0x80b1:
1406 case 0x80b2:
1407 case 0x8093:
1408 asus_hides_smbus = 1;
1409 }
1410 else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
1411 switch (dev->subsystem_device) {
1412 case 0x8030:
1413 asus_hides_smbus = 1;
1414 }
1415 else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
1416 switch (dev->subsystem_device) {
1417 case 0x8070:
1418 asus_hides_smbus = 1;
1419 }
1420 else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
1421 switch (dev->subsystem_device) {
1422 case 0x80c9:
1423 asus_hides_smbus = 1;
1424 }
1425 else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
1426 switch (dev->subsystem_device) {
1427 case 0x1751:
1428 case 0x1821:
1429 case 0x1897:
1430 asus_hides_smbus = 1;
1431 }
1432 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1433 switch (dev->subsystem_device) {
1434 case 0x184b:
1435 case 0x186a:
1436 asus_hides_smbus = 1;
1437 }
1438 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1439 switch (dev->subsystem_device) {
1440 case 0x80f2:
1441 asus_hides_smbus = 1;
1442 }
1443 else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
1444 switch (dev->subsystem_device) {
1445 case 0x1882:
1446 case 0x1977:
1447 asus_hides_smbus = 1;
1448 }
1449 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
1450 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1451 switch (dev->subsystem_device) {
1452 case 0x088C:
1453 case 0x0890:
1454 asus_hides_smbus = 1;
1455 }
1456 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1457 switch (dev->subsystem_device) {
1458 case 0x12bc:
1459 case 0x12bd:
1460 case 0x006a:
1461 asus_hides_smbus = 1;
1462 }
1463 else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
1464 switch (dev->subsystem_device) {
1465 case 0x12bf:
1466 asus_hides_smbus = 1;
1467 }
1468 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
1469 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1470 switch (dev->subsystem_device) {
1471 case 0xC00C:
1472 asus_hides_smbus = 1;
1473 }
1474 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
1475 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1476 switch (dev->subsystem_device) {
1477 case 0x0058:
1478 asus_hides_smbus = 1;
1479 }
1480 else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
1481 switch (dev->subsystem_device) {
1482 case 0xB16C:
1483
1484
1485
1486 asus_hides_smbus = 1;
1487 }
1488 else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
1489 switch (dev->subsystem_device) {
1490 case 0x00b8:
1491 case 0x00b9:
1492 case 0x00ba:
1493
1494
1495
1496
1497
1498 asus_hides_smbus = 1;
1499 }
1500 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
1501 switch (dev->subsystem_device) {
1502 case 0x001A:
1503
1504
1505
1506 asus_hides_smbus = 1;
1507 }
1508 }
1509}
1510DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge);
1511DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge);
1512DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge);
1513DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge);
1514DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, asus_hides_smbus_hostbridge);
1515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge);
1516DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge);
1517DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge);
1518DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge);
1519DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
1520
1521DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
1522DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
1523DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
1524
1525static void asus_hides_smbus_lpc(struct pci_dev *dev)
1526{
1527 u16 val;
1528
1529 if (likely(!asus_hides_smbus))
1530 return;
1531
1532 pci_read_config_word(dev, 0xF2, &val);
1533 if (val & 0x8) {
1534 pci_write_config_word(dev, 0xF2, val & (~0x8));
1535 pci_read_config_word(dev, 0xF2, &val);
1536 if (val & 0x8)
1537 pci_info(dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
1538 val);
1539 else
1540 pci_info(dev, "Enabled i801 SMBus device\n");
1541 }
1542}
1543DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1544DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1545DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1546DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1547DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1548DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1549DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1550DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1551DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1552DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1553DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1554DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1555DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1556DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1557
1558
1559static void __iomem *asus_rcba_base;
1560static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
1561{
1562 u32 rcba;
1563
1564 if (likely(!asus_hides_smbus))
1565 return;
1566 WARN_ON(asus_rcba_base);
1567
1568 pci_read_config_dword(dev, 0xF0, &rcba);
1569
1570 asus_rcba_base = ioremap(rcba & 0xFFFFC000, 0x4000);
1571 if (asus_rcba_base == NULL)
1572 return;
1573}
1574
1575static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
1576{
1577 u32 val;
1578
1579 if (likely(!asus_hides_smbus || !asus_rcba_base))
1580 return;
1581
1582
1583 val = readl(asus_rcba_base + 0x3418);
1584
1585
1586 writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418);
1587}
1588
1589static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
1590{
1591 if (likely(!asus_hides_smbus || !asus_rcba_base))
1592 return;
1593
1594 iounmap(asus_rcba_base);
1595 asus_rcba_base = NULL;
1596 pci_info(dev, "Enabled ICH6/i801 SMBus device\n");
1597}
1598
1599static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1600{
1601 asus_hides_smbus_lpc_ich6_suspend(dev);
1602 asus_hides_smbus_lpc_ich6_resume_early(dev);
1603 asus_hides_smbus_lpc_ich6_resume(dev);
1604}
1605DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6);
1606DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend);
1607DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
1608DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
1609
1610
1611static void quirk_sis_96x_smbus(struct pci_dev *dev)
1612{
1613 u8 val = 0;
1614 pci_read_config_byte(dev, 0x77, &val);
1615 if (val & 0x10) {
1616 pci_info(dev, "Enabling SiS 96x SMBus\n");
1617 pci_write_config_byte(dev, 0x77, val & ~0x10);
1618 }
1619}
1620DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1621DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1622DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1623DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1624DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1625DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1626DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1627DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637#define SIS_DETECT_REGISTER 0x40
1638
1639static void quirk_sis_503(struct pci_dev *dev)
1640{
1641 u8 reg;
1642 u16 devid;
1643
1644 pci_read_config_byte(dev, SIS_DETECT_REGISTER, ®);
1645 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
1646 pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
1647 if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
1648 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
1649 return;
1650 }
1651
1652
1653
1654
1655
1656
1657 dev->device = devid;
1658 quirk_sis_96x_smbus(dev);
1659}
1660DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1661DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1662
1663
1664
1665
1666
1667
1668
1669static void asus_hides_ac97_lpc(struct pci_dev *dev)
1670{
1671 u8 val;
1672 int asus_hides_ac97 = 0;
1673
1674 if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1675 if (dev->device == PCI_DEVICE_ID_VIA_8237)
1676 asus_hides_ac97 = 1;
1677 }
1678
1679 if (!asus_hides_ac97)
1680 return;
1681
1682 pci_read_config_byte(dev, 0x50, &val);
1683 if (val & 0xc0) {
1684 pci_write_config_byte(dev, 0x50, val & (~0xc0));
1685 pci_read_config_byte(dev, 0x50, &val);
1686 if (val & 0xc0)
1687 pci_info(dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
1688 val);
1689 else
1690 pci_info(dev, "Enabled onboard AC97/MC97 devices\n");
1691 }
1692}
1693DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1694DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1695
1696#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
1697
1698
1699
1700
1701
1702static void quirk_jmicron_ata(struct pci_dev *pdev)
1703{
1704 u32 conf1, conf5, class;
1705 u8 hdr;
1706
1707
1708 if (PCI_FUNC(pdev->devfn))
1709 return;
1710
1711 pci_read_config_dword(pdev, 0x40, &conf1);
1712 pci_read_config_dword(pdev, 0x80, &conf5);
1713
1714 conf1 &= ~0x00CFF302;
1715 conf5 &= ~(1 << 24);
1716
1717 switch (pdev->device) {
1718 case PCI_DEVICE_ID_JMICRON_JMB360:
1719 case PCI_DEVICE_ID_JMICRON_JMB362:
1720 case PCI_DEVICE_ID_JMICRON_JMB364:
1721
1722 conf1 |= 0x0002A100;
1723 break;
1724
1725 case PCI_DEVICE_ID_JMICRON_JMB365:
1726 case PCI_DEVICE_ID_JMICRON_JMB366:
1727
1728 conf5 |= (1 << 24);
1729 fallthrough;
1730 case PCI_DEVICE_ID_JMICRON_JMB361:
1731 case PCI_DEVICE_ID_JMICRON_JMB363:
1732 case PCI_DEVICE_ID_JMICRON_JMB369:
1733
1734
1735 conf1 |= 0x00C2A1B3;
1736 break;
1737
1738 case PCI_DEVICE_ID_JMICRON_JMB368:
1739
1740 conf1 |= 0x00C00000;
1741 break;
1742 }
1743
1744 pci_write_config_dword(pdev, 0x40, conf1);
1745 pci_write_config_dword(pdev, 0x80, conf5);
1746
1747
1748 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
1749 pdev->hdr_type = hdr & 0x7f;
1750 pdev->multifunction = !!(hdr & 0x80);
1751
1752 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
1753 pdev->class = class >> 8;
1754}
1755DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1756DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1757DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1758DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1759DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1760DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1761DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1762DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1763DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1764DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1765DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1766DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1767DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1768DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1769DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1770DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1771DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1772DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1773
1774#endif
1775
1776static void quirk_jmicron_async_suspend(struct pci_dev *dev)
1777{
1778 if (dev->multifunction) {
1779 device_disable_async_suspend(&dev->dev);
1780 pci_info(dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
1781 }
1782}
1783DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend);
1784DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend);
1785DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend);
1786DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend);
1787
1788#ifdef CONFIG_X86_IO_APIC
1789static void quirk_alder_ioapic(struct pci_dev *pdev)
1790{
1791 int i;
1792
1793 if ((pdev->class >> 8) != 0xff00)
1794 return;
1795
1796
1797
1798
1799
1800
1801 if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
1802 insert_resource(&iomem_resource, &pdev->resource[0]);
1803
1804
1805
1806
1807
1808 for (i = 1; i < PCI_STD_NUM_BARS; i++)
1809 memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
1810}
1811DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
1812#endif
1813
1814static void quirk_pcie_mch(struct pci_dev *pdev)
1815{
1816 pdev->no_msi = 1;
1817}
1818DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
1819DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
1820DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
1821
1822DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836static void quirk_huawei_pcie_sva(struct pci_dev *pdev)
1837{
1838 struct property_entry properties[] = {
1839 PROPERTY_ENTRY_BOOL("dma-can-stall"),
1840 {},
1841 };
1842
1843 if (pdev->revision != 0x21 && pdev->revision != 0x30)
1844 return;
1845
1846 pdev->pasid_no_tlp = 1;
1847
1848
1849
1850
1851
1852 if (!pdev->dev.of_node &&
1853 device_add_properties(&pdev->dev, properties))
1854 pci_warn(pdev, "could not add stall property");
1855}
1856DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
1857DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
1858DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
1859DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
1860DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
1861DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
1862
1863
1864
1865
1866
1867static void quirk_pcie_pxh(struct pci_dev *dev)
1868{
1869 dev->no_msi = 1;
1870 pci_warn(dev, "PXH quirk detected; SHPC device MSI disabled\n");
1871}
1872DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh);
1873DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh);
1874DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh);
1875DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh);
1876DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
1877
1878
1879
1880
1881
1882static void quirk_intel_pcie_pm(struct pci_dev *dev)
1883{
1884 pci_pm_d3hot_delay = 120;
1885 dev->no_d1d2 = 1;
1886}
1887DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
1888DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm);
1889DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm);
1890DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm);
1891DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm);
1892DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm);
1893DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm);
1894DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm);
1895DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm);
1896DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm);
1897DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm);
1898DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm);
1899DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm);
1900DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm);
1901DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm);
1902DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm);
1903DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm);
1904DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm);
1905DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
1906DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
1907DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
1908
1909static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay)
1910{
1911 if (dev->d3hot_delay >= delay)
1912 return;
1913
1914 dev->d3hot_delay = delay;
1915 pci_info(dev, "extending delay after power-on from D3hot to %d msec\n",
1916 dev->d3hot_delay);
1917}
1918
1919static void quirk_radeon_pm(struct pci_dev *dev)
1920{
1921 if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1922 dev->subsystem_device == 0x00e2)
1923 quirk_d3hot_delay(dev, 20);
1924}
1925DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
1937{
1938 quirk_d3hot_delay(dev, 20);
1939}
1940DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
1941DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
1942DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
1943
1944#ifdef CONFIG_X86_IO_APIC
1945static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
1946{
1947 noioapicreroute = 1;
1948 pr_info("%s detected: disable boot interrupt reroute\n", d->ident);
1949
1950 return 0;
1951}
1952
1953static const struct dmi_system_id boot_interrupt_dmi_table[] = {
1954
1955
1956
1957 {
1958 .callback = dmi_disable_ioapicreroute,
1959 .ident = "ASUSTek Computer INC. M2N-LR",
1960 .matches = {
1961 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer INC."),
1962 DMI_MATCH(DMI_PRODUCT_NAME, "M2N-LR"),
1963 },
1964 },
1965 {}
1966};
1967
1968
1969
1970
1971
1972
1973
1974static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
1975{
1976 dmi_check_system(boot_interrupt_dmi_table);
1977 if (noioapicquirk || noioapicreroute)
1978 return;
1979
1980 dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
1981 pci_info(dev, "rerouting interrupts for [%04x:%04x]\n",
1982 dev->vendor, dev->device);
1983}
1984DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1985DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1986DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1987DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1988DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1989DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1990DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1991DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1992DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1993DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1994DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1995DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1996DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1997DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1998DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1999DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020#define INTEL_6300_IOAPIC_ABAR 0x40
2021#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
2022
2023#define INTEL_CIPINTRC_CFG_OFFSET 0x14C
2024#define INTEL_CIPINTRC_DIS_INTX_ICH (1<<25)
2025
2026static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
2027{
2028 u16 pci_config_word;
2029 u32 pci_config_dword;
2030
2031 if (noioapicquirk)
2032 return;
2033
2034 switch (dev->device) {
2035 case PCI_DEVICE_ID_INTEL_ESB_10:
2036 pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR,
2037 &pci_config_word);
2038 pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
2039 pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR,
2040 pci_config_word);
2041 break;
2042 case 0x3c28:
2043 case 0x0e28:
2044 case 0x2f28:
2045 case 0x6f28:
2046 case 0x2034:
2047 pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
2048 &pci_config_dword);
2049 pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH;
2050 pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
2051 pci_config_dword);
2052 break;
2053 default:
2054 return;
2055 }
2056 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2057 dev->vendor, dev->device);
2058}
2059
2060
2061
2062
2063DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
2064 quirk_disable_intel_boot_interrupt);
2065DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
2066 quirk_disable_intel_boot_interrupt);
2067
2068
2069
2070
2071
2072
2073
2074
2075DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x3c28,
2076 quirk_disable_intel_boot_interrupt);
2077DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0e28,
2078 quirk_disable_intel_boot_interrupt);
2079DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2f28,
2080 quirk_disable_intel_boot_interrupt);
2081DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x6f28,
2082 quirk_disable_intel_boot_interrupt);
2083DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2034,
2084 quirk_disable_intel_boot_interrupt);
2085DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x3c28,
2086 quirk_disable_intel_boot_interrupt);
2087DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x0e28,
2088 quirk_disable_intel_boot_interrupt);
2089DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2f28,
2090 quirk_disable_intel_boot_interrupt);
2091DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x6f28,
2092 quirk_disable_intel_boot_interrupt);
2093DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2034,
2094 quirk_disable_intel_boot_interrupt);
2095
2096
2097#define BC_HT1000_FEATURE_REG 0x64
2098#define BC_HT1000_PIC_REGS_ENABLE (1<<0)
2099#define BC_HT1000_MAP_IDX 0xC00
2100#define BC_HT1000_MAP_DATA 0xC01
2101
2102static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
2103{
2104 u32 pci_config_dword;
2105 u8 irq;
2106
2107 if (noioapicquirk)
2108 return;
2109
2110 pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
2111 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
2112 BC_HT1000_PIC_REGS_ENABLE);
2113
2114 for (irq = 0x10; irq < 0x10 + 32; irq++) {
2115 outb(irq, BC_HT1000_MAP_IDX);
2116 outb(0x00, BC_HT1000_MAP_DATA);
2117 }
2118
2119 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
2120
2121 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2122 dev->vendor, dev->device);
2123}
2124DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
2125DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
2126
2127
2128
2129
2130
2131
2132
2133
2134#define AMD_813X_MISC 0x40
2135#define AMD_813X_NOIOAMODE (1<<0)
2136#define AMD_813X_REV_B1 0x12
2137#define AMD_813X_REV_B2 0x13
2138
2139static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
2140{
2141 u32 pci_config_dword;
2142
2143 if (noioapicquirk)
2144 return;
2145 if ((dev->revision == AMD_813X_REV_B1) ||
2146 (dev->revision == AMD_813X_REV_B2))
2147 return;
2148
2149 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
2150 pci_config_dword &= ~AMD_813X_NOIOAMODE;
2151 pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
2152
2153 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2154 dev->vendor, dev->device);
2155}
2156DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2157DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2158DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2159DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2160
2161#define AMD_8111_PCI_IRQ_ROUTING 0x56
2162
2163static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
2164{
2165 u16 pci_config_word;
2166
2167 if (noioapicquirk)
2168 return;
2169
2170 pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
2171 if (!pci_config_word) {
2172 pci_info(dev, "boot interrupts on device [%04x:%04x] already disabled\n",
2173 dev->vendor, dev->device);
2174 return;
2175 }
2176 pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
2177 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2178 dev->vendor, dev->device);
2179}
2180DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
2181DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
2182#endif
2183
2184
2185
2186
2187
2188
2189static void quirk_tc86c001_ide(struct pci_dev *dev)
2190{
2191 struct resource *r = &dev->resource[0];
2192
2193 if (r->start & 0x8) {
2194 r->flags |= IORESOURCE_UNSET;
2195 r->start = 0;
2196 r->end = 0xf;
2197 }
2198}
2199DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
2200 PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
2201 quirk_tc86c001_ide);
2202
2203
2204
2205
2206
2207
2208
2209
2210static void quirk_plx_pci9050(struct pci_dev *dev)
2211{
2212 unsigned int bar;
2213
2214
2215 if (dev->revision >= 2)
2216 return;
2217 for (bar = 0; bar <= 1; bar++)
2218 if (pci_resource_len(dev, bar) == 0x80 &&
2219 (pci_resource_start(dev, bar) & 0x80)) {
2220 struct resource *r = &dev->resource[bar];
2221 pci_info(dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
2222 bar);
2223 r->flags |= IORESOURCE_UNSET;
2224 r->start = 0;
2225 r->end = 0xff;
2226 }
2227}
2228DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
2229 quirk_plx_pci9050);
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050);
2240DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050);
2241
2242static void quirk_netmos(struct pci_dev *dev)
2243{
2244 unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
2245 unsigned int num_serial = dev->subsystem_device & 0xf;
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257 switch (dev->device) {
2258 case PCI_DEVICE_ID_NETMOS_9835:
2259
2260 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
2261 dev->subsystem_device == 0x0299)
2262 return;
2263 fallthrough;
2264 case PCI_DEVICE_ID_NETMOS_9735:
2265 case PCI_DEVICE_ID_NETMOS_9745:
2266 case PCI_DEVICE_ID_NETMOS_9845:
2267 case PCI_DEVICE_ID_NETMOS_9855:
2268 if (num_parallel) {
2269 pci_info(dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
2270 dev->device, num_parallel, num_serial);
2271 dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
2272 (dev->class & 0xff);
2273 }
2274 }
2275}
2276DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
2277 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
2278
2279static void quirk_e100_interrupt(struct pci_dev *dev)
2280{
2281 u16 command, pmcsr;
2282 u8 __iomem *csr;
2283 u8 cmd_hi;
2284
2285 switch (dev->device) {
2286
2287 case 0x1029:
2288 case 0x1030 ... 0x1034:
2289 case 0x1038 ... 0x103E:
2290 case 0x1050 ... 0x1057:
2291 case 0x1059:
2292 case 0x1064 ... 0x106B:
2293 case 0x1091 ... 0x1095:
2294 case 0x1209:
2295 case 0x1229:
2296 case 0x2449:
2297 case 0x2459:
2298 case 0x245D:
2299 case 0x27DC:
2300 break;
2301 default:
2302 return;
2303 }
2304
2305
2306
2307
2308
2309
2310
2311
2312 pci_read_config_word(dev, PCI_COMMAND, &command);
2313
2314 if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
2315 return;
2316
2317
2318
2319
2320
2321 if (dev->pm_cap) {
2322 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2323 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
2324 return;
2325 }
2326
2327
2328 csr = ioremap(pci_resource_start(dev, 0), 8);
2329 if (!csr) {
2330 pci_warn(dev, "Can't map e100 registers\n");
2331 return;
2332 }
2333
2334 cmd_hi = readb(csr + 3);
2335 if (cmd_hi == 0) {
2336 pci_warn(dev, "Firmware left e100 interrupts enabled; disabling\n");
2337 writeb(1, csr + 3);
2338 }
2339
2340 iounmap(csr);
2341}
2342DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
2343 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt);
2344
2345
2346
2347
2348
2349static void quirk_disable_aspm_l0s(struct pci_dev *dev)
2350{
2351 pci_info(dev, "Disabling L0s\n");
2352 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
2353}
2354DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
2355DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
2356DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
2357DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
2358DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
2359DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
2360DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
2361DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
2362DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
2363DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
2364DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
2365DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
2366DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
2367DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
2368
2369static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
2370{
2371 pci_info(dev, "Disabling ASPM L0s/L1\n");
2372 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
2373}
2374
2375
2376
2377
2378
2379
2380DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
2391{
2392 dev->clear_retrain_link = 1;
2393 pci_info(dev, "Enable PCIe Retrain Link quirk\n");
2394}
2395DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe110, quirk_enable_clear_retrain_link);
2396DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe111, quirk_enable_clear_retrain_link);
2397DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe130, quirk_enable_clear_retrain_link);
2398
2399static void fixup_rev1_53c810(struct pci_dev *dev)
2400{
2401 u32 class = dev->class;
2402
2403
2404
2405
2406
2407 if (class)
2408 return;
2409
2410 dev->class = PCI_CLASS_STORAGE_SCSI << 8;
2411 pci_info(dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n",
2412 class, dev->class);
2413}
2414DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
2415
2416
2417static void quirk_p64h2_1k_io(struct pci_dev *dev)
2418{
2419 u16 en1k;
2420
2421 pci_read_config_word(dev, 0x40, &en1k);
2422
2423 if (en1k & 0x200) {
2424 pci_info(dev, "Enable I/O Space to 1KB granularity\n");
2425 dev->io_window_1k = 1;
2426 }
2427}
2428DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
2429
2430
2431
2432
2433
2434
2435static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
2436{
2437 uint8_t b;
2438
2439 if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
2440 if (!(b & 0x20)) {
2441 pci_write_config_byte(dev, 0xf41, b | 0x20);
2442 pci_info(dev, "Linking AER extended capability\n");
2443 }
2444 }
2445}
2446DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2447 quirk_nvidia_ck804_pcie_aer_ext_cap);
2448DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2449 quirk_nvidia_ck804_pcie_aer_ext_cap);
2450
2451static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
2452{
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464 struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,
2465 PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
2466 uint8_t b;
2467
2468
2469
2470
2471
2472 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
2473 if (!p)
2474 return;
2475 pci_dev_put(p);
2476
2477 if (pci_read_config_byte(dev, 0x76, &b) == 0) {
2478 if (b & 0x40) {
2479
2480 pci_write_config_byte(dev, 0x76, b ^ 0x40);
2481
2482 pci_info(dev, "Disabling VIA CX700 PCI parking\n");
2483 }
2484 }
2485
2486 if (pci_read_config_byte(dev, 0x72, &b) == 0) {
2487 if (b != 0) {
2488
2489 pci_write_config_byte(dev, 0x72, 0x0);
2490
2491
2492 pci_write_config_byte(dev, 0x75, 0x1);
2493
2494
2495 pci_write_config_byte(dev, 0x77, 0x0);
2496
2497 pci_info(dev, "Disabling VIA CX700 PCI caching\n");
2498 }
2499 }
2500}
2501DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
2502
2503static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
2504{
2505 u32 rev;
2506
2507 pci_read_config_dword(dev, 0xf4, &rev);
2508
2509
2510 if (rev == 0x05719000) {
2511 int readrq = pcie_get_readrq(dev);
2512 if (readrq > 2048)
2513 pcie_set_readrq(dev, 2048);
2514 }
2515}
2516DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
2517 PCI_DEVICE_ID_TIGON3_5719,
2518 quirk_brcm_5719_limit_mrrs);
2519
2520
2521
2522
2523
2524
2525
2526static void quirk_unhide_mch_dev6(struct pci_dev *dev)
2527{
2528 u8 reg;
2529
2530 if (pci_read_config_byte(dev, 0xF4, ®) == 0 && !(reg & 0x02)) {
2531 pci_info(dev, "Enabling MCH 'Overflow' Device\n");
2532 pci_write_config_byte(dev, 0xF4, reg | 0x02);
2533 }
2534}
2535DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
2536 quirk_unhide_mch_dev6);
2537DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
2538 quirk_unhide_mch_dev6);
2539
2540#ifdef CONFIG_PCI_MSI
2541
2542
2543
2544
2545
2546
2547
2548static void quirk_disable_all_msi(struct pci_dev *dev)
2549{
2550 pci_no_msi();
2551 pci_warn(dev, "MSI quirk detected; MSI disabled\n");
2552}
2553DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
2554DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
2555DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
2556DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
2557DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
2558DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
2559DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
2560DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
2561DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SAMSUNG, 0xa5e3, quirk_disable_all_msi);
2562
2563
2564static void quirk_disable_msi(struct pci_dev *dev)
2565{
2566 if (dev->subordinate) {
2567 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2568 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2569 }
2570}
2571DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
2572DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
2573DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
2574
2575
2576
2577
2578
2579
2580
2581static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
2582{
2583 struct pci_dev *apc_bridge;
2584
2585 apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0));
2586 if (apc_bridge) {
2587 if (apc_bridge->device == 0x9602)
2588 quirk_disable_msi(apc_bridge);
2589 pci_dev_put(apc_bridge);
2590 }
2591}
2592DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
2593DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
2594
2595
2596
2597
2598
2599static int msi_ht_cap_enabled(struct pci_dev *dev)
2600{
2601 int pos, ttl = PCI_FIND_CAP_TTL;
2602
2603 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2604 while (pos && ttl--) {
2605 u8 flags;
2606
2607 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2608 &flags) == 0) {
2609 pci_info(dev, "Found %s HT MSI Mapping\n",
2610 flags & HT_MSI_FLAGS_ENABLE ?
2611 "enabled" : "disabled");
2612 return (flags & HT_MSI_FLAGS_ENABLE) != 0;
2613 }
2614
2615 pos = pci_find_next_ht_capability(dev, pos,
2616 HT_CAPTYPE_MSI_MAPPING);
2617 }
2618 return 0;
2619}
2620
2621
2622static void quirk_msi_ht_cap(struct pci_dev *dev)
2623{
2624 if (!msi_ht_cap_enabled(dev))
2625 quirk_disable_msi(dev);
2626}
2627DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
2628 quirk_msi_ht_cap);
2629
2630
2631
2632
2633
2634static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
2635{
2636 struct pci_dev *pdev;
2637
2638
2639
2640
2641
2642 pdev = pci_get_slot(dev->bus, 0);
2643 if (!pdev)
2644 return;
2645 if (!msi_ht_cap_enabled(pdev))
2646 quirk_msi_ht_cap(dev);
2647 pci_dev_put(pdev);
2648}
2649DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2650 quirk_nvidia_ck804_msi_ht_cap);
2651
2652
2653static void ht_enable_msi_mapping(struct pci_dev *dev)
2654{
2655 int pos, ttl = PCI_FIND_CAP_TTL;
2656
2657 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2658 while (pos && ttl--) {
2659 u8 flags;
2660
2661 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2662 &flags) == 0) {
2663 pci_info(dev, "Enabling HT MSI Mapping\n");
2664
2665 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2666 flags | HT_MSI_FLAGS_ENABLE);
2667 }
2668 pos = pci_find_next_ht_capability(dev, pos,
2669 HT_CAPTYPE_MSI_MAPPING);
2670 }
2671}
2672DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
2673 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
2674 ht_enable_msi_mapping);
2675DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
2676 ht_enable_msi_mapping);
2677
2678
2679
2680
2681
2682
2683static void nvenet_msi_disable(struct pci_dev *dev)
2684{
2685 const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
2686
2687 if (board_name &&
2688 (strstr(board_name, "P5N32-SLI PREMIUM") ||
2689 strstr(board_name, "P5N32-E SLI"))) {
2690 pci_info(dev, "Disabling MSI for MCP55 NIC on P5N32-SLI\n");
2691 dev->no_msi = 1;
2692 }
2693}
2694DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2695 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2696 nvenet_msi_disable);
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707static void pci_quirk_nvidia_tegra_disable_rp_msi(struct pci_dev *dev)
2708{
2709 dev->no_msi = 1;
2710}
2711DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad0,
2712 PCI_CLASS_BRIDGE_PCI, 8,
2713 pci_quirk_nvidia_tegra_disable_rp_msi);
2714DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad1,
2715 PCI_CLASS_BRIDGE_PCI, 8,
2716 pci_quirk_nvidia_tegra_disable_rp_msi);
2717DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad2,
2718 PCI_CLASS_BRIDGE_PCI, 8,
2719 pci_quirk_nvidia_tegra_disable_rp_msi);
2720DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0,
2721 PCI_CLASS_BRIDGE_PCI, 8,
2722 pci_quirk_nvidia_tegra_disable_rp_msi);
2723DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1,
2724 PCI_CLASS_BRIDGE_PCI, 8,
2725 pci_quirk_nvidia_tegra_disable_rp_msi);
2726DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c,
2727 PCI_CLASS_BRIDGE_PCI, 8,
2728 pci_quirk_nvidia_tegra_disable_rp_msi);
2729DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d,
2730 PCI_CLASS_BRIDGE_PCI, 8,
2731 pci_quirk_nvidia_tegra_disable_rp_msi);
2732DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e12,
2733 PCI_CLASS_BRIDGE_PCI, 8,
2734 pci_quirk_nvidia_tegra_disable_rp_msi);
2735DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e13,
2736 PCI_CLASS_BRIDGE_PCI, 8,
2737 pci_quirk_nvidia_tegra_disable_rp_msi);
2738DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0fae,
2739 PCI_CLASS_BRIDGE_PCI, 8,
2740 pci_quirk_nvidia_tegra_disable_rp_msi);
2741DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0faf,
2742 PCI_CLASS_BRIDGE_PCI, 8,
2743 pci_quirk_nvidia_tegra_disable_rp_msi);
2744DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e5,
2745 PCI_CLASS_BRIDGE_PCI, 8,
2746 pci_quirk_nvidia_tegra_disable_rp_msi);
2747DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e6,
2748 PCI_CLASS_BRIDGE_PCI, 8,
2749 pci_quirk_nvidia_tegra_disable_rp_msi);
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
2762{
2763 u32 cfg;
2764
2765 if (!pci_find_capability(dev, PCI_CAP_ID_HT))
2766 return;
2767
2768 pci_read_config_dword(dev, 0x74, &cfg);
2769
2770 if (cfg & ((1 << 2) | (1 << 15))) {
2771 pr_info("Rewriting IRQ routing register on MCP55\n");
2772 cfg &= ~((1 << 2) | (1 << 15));
2773 pci_write_config_dword(dev, 0x74, cfg);
2774 }
2775}
2776DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2777 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
2778 nvbridge_check_legacy_irq_routing);
2779DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2780 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
2781 nvbridge_check_legacy_irq_routing);
2782
2783static int ht_check_msi_mapping(struct pci_dev *dev)
2784{
2785 int pos, ttl = PCI_FIND_CAP_TTL;
2786 int found = 0;
2787
2788
2789 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2790 while (pos && ttl--) {
2791 u8 flags;
2792
2793 if (found < 1)
2794 found = 1;
2795 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2796 &flags) == 0) {
2797 if (flags & HT_MSI_FLAGS_ENABLE) {
2798 if (found < 2) {
2799 found = 2;
2800 break;
2801 }
2802 }
2803 }
2804 pos = pci_find_next_ht_capability(dev, pos,
2805 HT_CAPTYPE_MSI_MAPPING);
2806 }
2807
2808 return found;
2809}
2810
2811static int host_bridge_with_leaf(struct pci_dev *host_bridge)
2812{
2813 struct pci_dev *dev;
2814 int pos;
2815 int i, dev_no;
2816 int found = 0;
2817
2818 dev_no = host_bridge->devfn >> 3;
2819 for (i = dev_no + 1; i < 0x20; i++) {
2820 dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
2821 if (!dev)
2822 continue;
2823
2824
2825 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2826 if (pos != 0) {
2827 pci_dev_put(dev);
2828 break;
2829 }
2830
2831 if (ht_check_msi_mapping(dev)) {
2832 found = 1;
2833 pci_dev_put(dev);
2834 break;
2835 }
2836 pci_dev_put(dev);
2837 }
2838
2839 return found;
2840}
2841
2842#define PCI_HT_CAP_SLAVE_CTRL0 4
2843#define PCI_HT_CAP_SLAVE_CTRL1 8
2844
2845static int is_end_of_ht_chain(struct pci_dev *dev)
2846{
2847 int pos, ctrl_off;
2848 int end = 0;
2849 u16 flags, ctrl;
2850
2851 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2852
2853 if (!pos)
2854 goto out;
2855
2856 pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
2857
2858 ctrl_off = ((flags >> 10) & 1) ?
2859 PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
2860 pci_read_config_word(dev, pos + ctrl_off, &ctrl);
2861
2862 if (ctrl & (1 << 6))
2863 end = 1;
2864
2865out:
2866 return end;
2867}
2868
2869static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
2870{
2871 struct pci_dev *host_bridge;
2872 int pos;
2873 int i, dev_no;
2874 int found = 0;
2875
2876 dev_no = dev->devfn >> 3;
2877 for (i = dev_no; i >= 0; i--) {
2878 host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
2879 if (!host_bridge)
2880 continue;
2881
2882 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2883 if (pos != 0) {
2884 found = 1;
2885 break;
2886 }
2887 pci_dev_put(host_bridge);
2888 }
2889
2890 if (!found)
2891 return;
2892
2893
2894 if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
2895 host_bridge_with_leaf(host_bridge))
2896 goto out;
2897
2898
2899 if (msi_ht_cap_enabled(host_bridge))
2900 goto out;
2901
2902 ht_enable_msi_mapping(dev);
2903
2904out:
2905 pci_dev_put(host_bridge);
2906}
2907
2908static void ht_disable_msi_mapping(struct pci_dev *dev)
2909{
2910 int pos, ttl = PCI_FIND_CAP_TTL;
2911
2912 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2913 while (pos && ttl--) {
2914 u8 flags;
2915
2916 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2917 &flags) == 0) {
2918 pci_info(dev, "Disabling HT MSI Mapping\n");
2919
2920 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2921 flags & ~HT_MSI_FLAGS_ENABLE);
2922 }
2923 pos = pci_find_next_ht_capability(dev, pos,
2924 HT_CAPTYPE_MSI_MAPPING);
2925 }
2926}
2927
2928static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
2929{
2930 struct pci_dev *host_bridge;
2931 int pos;
2932 int found;
2933
2934 if (!pci_msi_enabled())
2935 return;
2936
2937
2938 found = ht_check_msi_mapping(dev);
2939
2940
2941 if (found == 0)
2942 return;
2943
2944
2945
2946
2947
2948 host_bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), 0,
2949 PCI_DEVFN(0, 0));
2950 if (host_bridge == NULL) {
2951 pci_warn(dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
2952 return;
2953 }
2954
2955 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2956 if (pos != 0) {
2957
2958 if (found == 1) {
2959
2960 if (all)
2961 ht_enable_msi_mapping(dev);
2962 else
2963 nv_ht_enable_msi_mapping(dev);
2964 }
2965 goto out;
2966 }
2967
2968
2969 if (found == 1)
2970 goto out;
2971
2972
2973 ht_disable_msi_mapping(dev);
2974
2975out:
2976 pci_dev_put(host_bridge);
2977}
2978
2979static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
2980{
2981 return __nv_msi_ht_cap_quirk(dev, 1);
2982}
2983DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2984DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2985
2986static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
2987{
2988 return __nv_msi_ht_cap_quirk(dev, 0);
2989}
2990DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2991DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2992
2993static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
2994{
2995 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2996}
2997
2998static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
2999{
3000 struct pci_dev *p;
3001
3002
3003
3004
3005
3006
3007 p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
3008 NULL);
3009 if (!p)
3010 return;
3011
3012 if ((p->revision < 0x3B) && (p->revision >= 0x30))
3013 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
3014 pci_dev_put(p);
3015}
3016
3017static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
3018{
3019
3020 if (dev->revision < 0x18) {
3021 pci_info(dev, "set MSI_INTX_DISABLE_BUG flag\n");
3022 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
3023 }
3024}
3025DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3026 PCI_DEVICE_ID_TIGON3_5780,
3027 quirk_msi_intx_disable_bug);
3028DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3029 PCI_DEVICE_ID_TIGON3_5780S,
3030 quirk_msi_intx_disable_bug);
3031DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3032 PCI_DEVICE_ID_TIGON3_5714,
3033 quirk_msi_intx_disable_bug);
3034DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3035 PCI_DEVICE_ID_TIGON3_5714S,
3036 quirk_msi_intx_disable_bug);
3037DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3038 PCI_DEVICE_ID_TIGON3_5715,
3039 quirk_msi_intx_disable_bug);
3040DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3041 PCI_DEVICE_ID_TIGON3_5715S,
3042 quirk_msi_intx_disable_bug);
3043
3044DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390,
3045 quirk_msi_intx_disable_ati_bug);
3046DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391,
3047 quirk_msi_intx_disable_ati_bug);
3048DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392,
3049 quirk_msi_intx_disable_ati_bug);
3050DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393,
3051 quirk_msi_intx_disable_ati_bug);
3052DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394,
3053 quirk_msi_intx_disable_ati_bug);
3054
3055DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373,
3056 quirk_msi_intx_disable_bug);
3057DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
3058 quirk_msi_intx_disable_bug);
3059DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
3060 quirk_msi_intx_disable_bug);
3061
3062DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
3063 quirk_msi_intx_disable_bug);
3064DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
3065 quirk_msi_intx_disable_bug);
3066DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
3067 quirk_msi_intx_disable_bug);
3068DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
3069 quirk_msi_intx_disable_bug);
3070DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
3071 quirk_msi_intx_disable_bug);
3072DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
3073 quirk_msi_intx_disable_bug);
3074DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090,
3075 quirk_msi_intx_disable_qca_bug);
3076DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091,
3077 quirk_msi_intx_disable_qca_bug);
3078DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0,
3079 quirk_msi_intx_disable_qca_bug);
3080DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
3081 quirk_msi_intx_disable_qca_bug);
3082DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
3083 quirk_msi_intx_disable_qca_bug);
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095static void quirk_al_msi_disable(struct pci_dev *dev)
3096{
3097 dev->no_msi = 1;
3098 pci_warn(dev, "Disabling MSI/MSI-X\n");
3099}
3100DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
3101 PCI_CLASS_BRIDGE_PCI, 8, quirk_al_msi_disable);
3102#endif
3103
3104
3105
3106
3107
3108
3109
3110
3111static void quirk_hotplug_bridge(struct pci_dev *dev)
3112{
3113 dev->is_hotplug_bridge = 1;
3114}
3115DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142#ifdef CONFIG_MMC_RICOH_MMC
3143static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
3144{
3145 u8 write_enable;
3146 u8 write_target;
3147 u8 disable;
3148
3149
3150
3151
3152
3153
3154 if (PCI_FUNC(dev->devfn))
3155 return;
3156
3157 pci_read_config_byte(dev, 0xB7, &disable);
3158 if (disable & 0x02)
3159 return;
3160
3161 pci_read_config_byte(dev, 0x8E, &write_enable);
3162 pci_write_config_byte(dev, 0x8E, 0xAA);
3163 pci_read_config_byte(dev, 0x8D, &write_target);
3164 pci_write_config_byte(dev, 0x8D, 0xB7);
3165 pci_write_config_byte(dev, 0xB7, disable | 0x02);
3166 pci_write_config_byte(dev, 0x8E, write_enable);
3167 pci_write_config_byte(dev, 0x8D, write_target);
3168
3169 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via CardBus function)\n");
3170 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
3171}
3172DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
3173DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
3174
3175static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
3176{
3177 u8 write_enable;
3178 u8 disable;
3179
3180
3181
3182
3183
3184
3185 if (PCI_FUNC(dev->devfn))
3186 return;
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199 if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
3200 dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
3201 pci_write_config_byte(dev, 0xf9, 0xfc);
3202 pci_write_config_byte(dev, 0x150, 0x10);
3203 pci_write_config_byte(dev, 0xf9, 0x00);
3204 pci_write_config_byte(dev, 0xfc, 0x01);
3205 pci_write_config_byte(dev, 0xe1, 0x32);
3206 pci_write_config_byte(dev, 0xfc, 0x00);
3207
3208 pci_notice(dev, "MMC controller base frequency changed to 50Mhz.\n");
3209 }
3210
3211 pci_read_config_byte(dev, 0xCB, &disable);
3212
3213 if (disable & 0x02)
3214 return;
3215
3216 pci_read_config_byte(dev, 0xCA, &write_enable);
3217 pci_write_config_byte(dev, 0xCA, 0x57);
3218 pci_write_config_byte(dev, 0xCB, disable | 0x02);
3219 pci_write_config_byte(dev, 0xCA, write_enable);
3220
3221 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via FireWire function)\n");
3222 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
3223
3224}
3225DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
3226DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
3227DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
3228DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
3229DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
3230DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
3231#endif
3232
3233#ifdef CONFIG_DMAR_TABLE
3234#define VTUNCERRMSK_REG 0x1ac
3235#define VTD_MSK_SPEC_ERRORS (1 << 31)
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246static void vtd_mask_spec_errors(struct pci_dev *dev)
3247{
3248 u32 word;
3249
3250 pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
3251 pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
3252}
3253DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
3254DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
3255#endif
3256
3257static void fixup_ti816x_class(struct pci_dev *dev)
3258{
3259 u32 class = dev->class;
3260
3261
3262 dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
3263 pci_info(dev, "PCI class overridden (%#08x -> %#08x)\n",
3264 class, dev->class);
3265}
3266DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
3267 PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
3268
3269
3270
3271
3272
3273static void fixup_mpss_256(struct pci_dev *dev)
3274{
3275 dev->pcie_mpss = 1;
3276}
3277DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
3278 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
3279DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
3280 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
3281DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
3282 PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
3283DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256);
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293static void quirk_intel_mc_errata(struct pci_dev *dev)
3294{
3295 int err;
3296 u16 rcc;
3297
3298 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
3299 pcie_bus_config == PCIE_BUS_DEFAULT)
3300 return;
3301
3302
3303
3304
3305
3306
3307 err = pci_read_config_word(dev, 0x48, &rcc);
3308 if (err) {
3309 pci_err(dev, "Error attempting to read the read completion coalescing register\n");
3310 return;
3311 }
3312
3313 if (!(rcc & (1 << 10)))
3314 return;
3315
3316 rcc &= ~(1 << 10);
3317
3318 err = pci_write_config_word(dev, 0x48, rcc);
3319 if (err) {
3320 pci_err(dev, "Error attempting to write the read completion coalescing register\n");
3321 return;
3322 }
3323
3324 pr_info_once("Read completion coalescing disabled due to hardware erratum relating to 256B MPS\n");
3325}
3326
3327DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
3328DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
3329DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
3330DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
3331DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
3332DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
3333DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
3334DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
3335DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
3336DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
3337DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
3338DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
3339DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
3340DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
3341
3342DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
3343DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
3344DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
3345DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
3346DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
3347DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
3348DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
3349DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
3350DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
3351DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
3352DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
3353
3354
3355
3356
3357
3358
3359static void quirk_intel_ntb(struct pci_dev *dev)
3360{
3361 int rc;
3362 u8 val;
3363
3364 rc = pci_read_config_byte(dev, 0x00D0, &val);
3365 if (rc)
3366 return;
3367
3368 dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
3369
3370 rc = pci_read_config_byte(dev, 0x00D1, &val);
3371 if (rc)
3372 return;
3373
3374 dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
3375}
3376DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
3377DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391#define I915_DEIER_REG 0x4400c
3392static void disable_igfx_irq(struct pci_dev *dev)
3393{
3394 void __iomem *regs = pci_iomap(dev, 0, 0);
3395 if (regs == NULL) {
3396 pci_warn(dev, "igfx quirk: Can't iomap PCI device\n");
3397 return;
3398 }
3399
3400
3401 if (readl(regs + I915_DEIER_REG) != 0) {
3402 pci_warn(dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
3403
3404 writel(0, regs + I915_DEIER_REG);
3405 }
3406
3407 pci_iounmap(dev, regs);
3408}
3409DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq);
3410DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq);
3411DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq);
3412DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
3413DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq);
3414DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
3415DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
3416
3417
3418
3419
3420
3421static void quirk_remove_d3hot_delay(struct pci_dev *dev)
3422{
3423 dev->d3hot_delay = 0;
3424}
3425
3426DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3hot_delay);
3427DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3hot_delay);
3428DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3hot_delay);
3429
3430DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3hot_delay);
3431DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3hot_delay);
3432DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3hot_delay);
3433DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3hot_delay);
3434DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3hot_delay);
3435DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3hot_delay);
3436DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3hot_delay);
3437DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3hot_delay);
3438DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3hot_delay);
3439DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3hot_delay);
3440DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3hot_delay);
3441
3442DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3hot_delay);
3443DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3hot_delay);
3444DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3hot_delay);
3445DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3hot_delay);
3446DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3hot_delay);
3447DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3hot_delay);
3448DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3hot_delay);
3449DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3hot_delay);
3450DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3hot_delay);
3451
3452
3453
3454
3455
3456
3457static void quirk_broken_intx_masking(struct pci_dev *dev)
3458{
3459 dev->broken_intx_masking = 1;
3460}
3461DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030,
3462 quirk_broken_intx_masking);
3463DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601,
3464 quirk_broken_intx_masking);
3465DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004,
3466 quirk_broken_intx_masking);
3467
3468
3469
3470
3471
3472
3473
3474DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
3475 quirk_broken_intx_masking);
3476
3477
3478
3479
3480
3481DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, quirk_broken_intx_masking);
3482DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, quirk_broken_intx_masking);
3483DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, quirk_broken_intx_masking);
3484DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, quirk_broken_intx_masking);
3485DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, quirk_broken_intx_masking);
3486DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, quirk_broken_intx_masking);
3487DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, quirk_broken_intx_masking);
3488DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, quirk_broken_intx_masking);
3489DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, quirk_broken_intx_masking);
3490DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking);
3491DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking);
3492DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, quirk_broken_intx_masking);
3493DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, quirk_broken_intx_masking);
3494DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking);
3495DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, quirk_broken_intx_masking);
3496DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, quirk_broken_intx_masking);
3497
3498static u16 mellanox_broken_intx_devs[] = {
3499 PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
3500 PCI_DEVICE_ID_MELLANOX_HERMON_DDR,
3501 PCI_DEVICE_ID_MELLANOX_HERMON_QDR,
3502 PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2,
3503 PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2,
3504 PCI_DEVICE_ID_MELLANOX_HERMON_EN,
3505 PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2,
3506 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN,
3507 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2,
3508 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2,
3509 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2,
3510 PCI_DEVICE_ID_MELLANOX_CONNECTX2,
3511 PCI_DEVICE_ID_MELLANOX_CONNECTX3,
3512 PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO,
3513};
3514
3515#define CONNECTX_4_CURR_MAX_MINOR 99
3516#define CONNECTX_4_INTX_SUPPORT_MINOR 14
3517
3518
3519
3520
3521
3522
3523
3524static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
3525{
3526 __be32 __iomem *fw_ver;
3527 u16 fw_major;
3528 u16 fw_minor;
3529 u16 fw_subminor;
3530 u32 fw_maj_min;
3531 u32 fw_sub_min;
3532 int i;
3533
3534 for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) {
3535 if (pdev->device == mellanox_broken_intx_devs[i]) {
3536 pdev->broken_intx_masking = 1;
3537 return;
3538 }
3539 }
3540
3541
3542
3543
3544
3545 if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
3546 return;
3547
3548 if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 &&
3549 pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX)
3550 return;
3551
3552
3553 if (pci_enable_device_mem(pdev)) {
3554 pci_warn(pdev, "Can't enable device memory\n");
3555 return;
3556 }
3557
3558 fw_ver = ioremap(pci_resource_start(pdev, 0), 4);
3559 if (!fw_ver) {
3560 pci_warn(pdev, "Can't map ConnectX-4 initialization segment\n");
3561 goto out;
3562 }
3563
3564
3565 fw_maj_min = ioread32be(fw_ver);
3566 fw_sub_min = ioread32be(fw_ver + 1);
3567 fw_major = fw_maj_min & 0xffff;
3568 fw_minor = fw_maj_min >> 16;
3569 fw_subminor = fw_sub_min & 0xffff;
3570 if (fw_minor > CONNECTX_4_CURR_MAX_MINOR ||
3571 fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) {
3572 pci_warn(pdev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n",
3573 fw_major, fw_minor, fw_subminor, pdev->device ==
3574 PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14);
3575 pdev->broken_intx_masking = 1;
3576 }
3577
3578 iounmap(fw_ver);
3579
3580out:
3581 pci_disable_device(pdev);
3582}
3583DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
3584 mellanox_check_broken_intx_masking);
3585
3586static void quirk_no_bus_reset(struct pci_dev *dev)
3587{
3588 dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
3589}
3590
3591
3592
3593
3594
3595static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
3596{
3597 if ((dev->device & 0xffc0) == 0x2340)
3598 quirk_no_bus_reset(dev);
3599}
3600DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
3601 quirk_nvidia_no_bus_reset);
3602
3603
3604
3605
3606
3607
3608
3609
3610DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
3611DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
3612DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
3613DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
3614DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
3615
3616
3617
3618
3619
3620
3621DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
3632
3633static void quirk_no_pm_reset(struct pci_dev *dev)
3634{
3635
3636
3637
3638
3639 if (!pci_is_root_bus(dev->bus))
3640 dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
3641}
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
3652 PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
3653
3654
3655
3656
3657
3658
3659static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
3660{
3661 if (pdev->is_hotplug_bridge &&
3662 (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
3663 pdev->revision <= 1))
3664 pdev->no_msi = 1;
3665}
3666DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
3667 quirk_thunderbolt_hotplug_msi);
3668DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE,
3669 quirk_thunderbolt_hotplug_msi);
3670DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK,
3671 quirk_thunderbolt_hotplug_msi);
3672DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3673 quirk_thunderbolt_hotplug_msi);
3674DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
3675 quirk_thunderbolt_hotplug_msi);
3676
3677#ifdef CONFIG_ACPI
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
3694{
3695 acpi_handle bridge, SXIO, SXFP, SXLV;
3696
3697 if (!x86_apple_machine)
3698 return;
3699 if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
3700 return;
3701
3702
3703
3704
3705
3706
3707
3708 if (!pm_suspend_via_firmware())
3709 return;
3710
3711 bridge = ACPI_HANDLE(&dev->dev);
3712 if (!bridge)
3713 return;
3714
3715
3716
3717
3718
3719
3720
3721
3722 if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
3723 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
3724 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
3725 return;
3726 pci_info(dev, "quirk: cutting power to Thunderbolt controller...\n");
3727
3728
3729 acpi_execute_simple_method(SXIO, NULL, 1);
3730 acpi_execute_simple_method(SXFP, NULL, 0);
3731 msleep(300);
3732 acpi_execute_simple_method(SXLV, NULL, 0);
3733 acpi_execute_simple_method(SXIO, NULL, 0);
3734 acpi_execute_simple_method(SXLV, NULL, 0);
3735}
3736DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
3737 PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3738 quirk_apple_poweroff_thunderbolt);
3739#endif
3740
3741
3742
3743
3744
3745
3746static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, bool probe)
3747{
3748
3749
3750
3751
3752
3753
3754
3755
3756 if (!probe)
3757 pcie_flr(dev);
3758 return 0;
3759}
3760
3761#define SOUTH_CHICKEN2 0xc2004
3762#define PCH_PP_STATUS 0xc7200
3763#define PCH_PP_CONTROL 0xc7204
3764#define MSG_CTL 0x45010
3765#define NSDE_PWR_STATE 0xd0100
3766#define IGD_OPERATION_TIMEOUT 10000
3767
3768static int reset_ivb_igd(struct pci_dev *dev, bool probe)
3769{
3770 void __iomem *mmio_base;
3771 unsigned long timeout;
3772 u32 val;
3773
3774 if (probe)
3775 return 0;
3776
3777 mmio_base = pci_iomap(dev, 0, 0);
3778 if (!mmio_base)
3779 return -ENOMEM;
3780
3781 iowrite32(0x00000002, mmio_base + MSG_CTL);
3782
3783
3784
3785
3786
3787
3788
3789 iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2);
3790
3791 val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe;
3792 iowrite32(val, mmio_base + PCH_PP_CONTROL);
3793
3794 timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT);
3795 do {
3796 val = ioread32(mmio_base + PCH_PP_STATUS);
3797 if ((val & 0xb0000000) == 0)
3798 goto reset_complete;
3799 msleep(10);
3800 } while (time_before(jiffies, timeout));
3801 pci_warn(dev, "timeout during reset\n");
3802
3803reset_complete:
3804 iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
3805
3806 pci_iounmap(dev, mmio_base);
3807 return 0;
3808}
3809
3810
3811static int reset_chelsio_generic_dev(struct pci_dev *dev, bool probe)
3812{
3813 u16 old_command;
3814 u16 msix_flags;
3815
3816
3817
3818
3819
3820 if ((dev->device & 0xf000) != 0x4000)
3821 return -ENOTTY;
3822
3823
3824
3825
3826
3827 if (probe)
3828 return 0;
3829
3830
3831
3832
3833
3834
3835
3836 pci_read_config_word(dev, PCI_COMMAND, &old_command);
3837 pci_write_config_word(dev, PCI_COMMAND,
3838 old_command | PCI_COMMAND_MASTER);
3839
3840
3841
3842
3843
3844 pci_save_state(dev);
3845
3846
3847
3848
3849
3850
3851
3852
3853 pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
3854 if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
3855 pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
3856 msix_flags |
3857 PCI_MSIX_FLAGS_ENABLE |
3858 PCI_MSIX_FLAGS_MASKALL);
3859
3860 pcie_flr(dev);
3861
3862
3863
3864
3865
3866
3867 pci_restore_state(dev);
3868 pci_write_config_word(dev, PCI_COMMAND, old_command);
3869 return 0;
3870}
3871
3872#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
3873#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
3874#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889static int nvme_disable_and_flr(struct pci_dev *dev, bool probe)
3890{
3891 void __iomem *bar;
3892 u16 cmd;
3893 u32 cfg;
3894
3895 if (dev->class != PCI_CLASS_STORAGE_EXPRESS ||
3896 pcie_reset_flr(dev, PCI_RESET_PROBE) || !pci_resource_start(dev, 0))
3897 return -ENOTTY;
3898
3899 if (probe)
3900 return 0;
3901
3902 bar = pci_iomap(dev, 0, NVME_REG_CC + sizeof(cfg));
3903 if (!bar)
3904 return -ENOTTY;
3905
3906 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3907 pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MEMORY);
3908
3909 cfg = readl(bar + NVME_REG_CC);
3910
3911
3912 if (cfg & NVME_CC_ENABLE) {
3913 u32 cap = readl(bar + NVME_REG_CAP);
3914 unsigned long timeout;
3915
3916
3917
3918
3919
3920
3921 cfg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
3922
3923 writel(cfg, bar + NVME_REG_CC);
3924
3925
3926
3927
3928
3929
3930
3931
3932 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
3933
3934 for (;;) {
3935 u32 status = readl(bar + NVME_REG_CSTS);
3936
3937
3938 if (!(status & NVME_CSTS_RDY))
3939 break;
3940
3941 msleep(100);
3942
3943 if (time_after(jiffies, timeout)) {
3944 pci_warn(dev, "Timeout waiting for NVMe ready status to clear after disable\n");
3945 break;
3946 }
3947 }
3948 }
3949
3950 pci_iounmap(dev, bar);
3951
3952 pcie_flr(dev);
3953
3954 return 0;
3955}
3956
3957
3958
3959
3960
3961
3962
3963static int delay_250ms_after_flr(struct pci_dev *dev, bool probe)
3964{
3965 if (probe)
3966 return pcie_reset_flr(dev, PCI_RESET_PROBE);
3967
3968 pcie_reset_flr(dev, PCI_RESET_DO_RESET);
3969
3970 msleep(250);
3971
3972 return 0;
3973}
3974
3975#define PCI_DEVICE_ID_HINIC_VF 0x375E
3976#define HINIC_VF_FLR_TYPE 0x1000
3977#define HINIC_VF_FLR_CAP_BIT (1UL << 30)
3978#define HINIC_VF_OP 0xE80
3979#define HINIC_VF_FLR_PROC_BIT (1UL << 18)
3980#define HINIC_OPERATION_TIMEOUT 15000
3981
3982
3983static int reset_hinic_vf_dev(struct pci_dev *pdev, bool probe)
3984{
3985 unsigned long timeout;
3986 void __iomem *bar;
3987 u32 val;
3988
3989 if (probe)
3990 return 0;
3991
3992 bar = pci_iomap(pdev, 0, 0);
3993 if (!bar)
3994 return -ENOTTY;
3995
3996
3997 val = ioread32be(bar + HINIC_VF_FLR_TYPE);
3998 if (!(val & HINIC_VF_FLR_CAP_BIT)) {
3999 pci_iounmap(pdev, bar);
4000 return -ENOTTY;
4001 }
4002
4003
4004 val = ioread32be(bar + HINIC_VF_OP);
4005 val = val | HINIC_VF_FLR_PROC_BIT;
4006 iowrite32be(val, bar + HINIC_VF_OP);
4007
4008 pcie_flr(pdev);
4009
4010
4011
4012
4013
4014
4015 pci_write_config_word(pdev, PCI_VENDOR_ID, 0);
4016
4017
4018 timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT);
4019 do {
4020 val = ioread32be(bar + HINIC_VF_OP);
4021 if (!(val & HINIC_VF_FLR_PROC_BIT))
4022 goto reset_complete;
4023 msleep(20);
4024 } while (time_before(jiffies, timeout));
4025
4026 val = ioread32be(bar + HINIC_VF_OP);
4027 if (!(val & HINIC_VF_FLR_PROC_BIT))
4028 goto reset_complete;
4029
4030 pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
4031
4032reset_complete:
4033 pci_iounmap(pdev, bar);
4034
4035 return 0;
4036}
4037
4038static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
4039 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
4040 reset_intel_82599_sfp_virtfn },
4041 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
4042 reset_ivb_igd },
4043 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
4044 reset_ivb_igd },
4045 { PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr },
4046 { PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
4047 { PCI_VENDOR_ID_INTEL, 0x0a54, delay_250ms_after_flr },
4048 { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
4049 reset_chelsio_generic_dev },
4050 { PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
4051 reset_hinic_vf_dev },
4052 { 0 }
4053};
4054
4055
4056
4057
4058
4059
4060int pci_dev_specific_reset(struct pci_dev *dev, bool probe)
4061{
4062 const struct pci_dev_reset_methods *i;
4063
4064 for (i = pci_dev_reset_methods; i->reset; i++) {
4065 if ((i->vendor == dev->vendor ||
4066 i->vendor == (u16)PCI_ANY_ID) &&
4067 (i->device == dev->device ||
4068 i->device == (u16)PCI_ANY_ID))
4069 return i->reset(dev, probe);
4070 }
4071
4072 return -ENOTTY;
4073}
4074
4075static void quirk_dma_func0_alias(struct pci_dev *dev)
4076{
4077 if (PCI_FUNC(dev->devfn) != 0)
4078 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0), 1);
4079}
4080
4081
4082
4083
4084
4085
4086DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
4087DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
4088
4089static void quirk_dma_func1_alias(struct pci_dev *dev)
4090{
4091 if (PCI_FUNC(dev->devfn) != 1)
4092 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1), 1);
4093}
4094
4095
4096
4097
4098
4099
4100
4101DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
4102 quirk_dma_func1_alias);
4103DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
4104 quirk_dma_func1_alias);
4105DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
4106 quirk_dma_func1_alias);
4107
4108DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
4109 quirk_dma_func1_alias);
4110DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
4111 quirk_dma_func1_alias);
4112
4113DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
4114 quirk_dma_func1_alias);
4115
4116DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
4117 quirk_dma_func1_alias);
4118
4119DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
4120 quirk_dma_func1_alias);
4121
4122DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183,
4123 quirk_dma_func1_alias);
4124
4125DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
4126 quirk_dma_func1_alias);
4127
4128DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9215,
4129 quirk_dma_func1_alias);
4130
4131DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
4132 quirk_dma_func1_alias);
4133
4134DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
4135 quirk_dma_func1_alias);
4136DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
4137 quirk_dma_func1_alias);
4138DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
4139 quirk_dma_func1_alias);
4140
4141DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
4142 PCI_DEVICE_ID_JMICRON_JMB388_ESD,
4143 quirk_dma_func1_alias);
4144
4145DECLARE_PCI_FIXUP_HEADER(0x1c28,
4146 0x0122,
4147 quirk_dma_func1_alias);
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164static const struct pci_device_id fixed_dma_alias_tbl[] = {
4165 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
4166 PCI_VENDOR_ID_ADAPTEC2, 0x02bb),
4167 .driver_data = PCI_DEVFN(1, 0) },
4168 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
4169 PCI_VENDOR_ID_ADAPTEC2, 0x02bc),
4170 .driver_data = PCI_DEVFN(1, 0) },
4171 { 0 }
4172};
4173
4174static void quirk_fixed_dma_alias(struct pci_dev *dev)
4175{
4176 const struct pci_device_id *id;
4177
4178 id = pci_match_id(fixed_dma_alias_tbl, dev);
4179 if (id)
4180 pci_add_dma_alias(dev, id->driver_data, 1);
4181}
4182DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev)
4194{
4195 if (!pci_is_root_bus(pdev->bus) &&
4196 pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
4197 !pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) &&
4198 pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE)
4199 pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS;
4200}
4201
4202DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
4203 quirk_use_pcie_bridge_dma_alias);
4204
4205DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
4206
4207DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
4208
4209DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8893, quirk_use_pcie_bridge_dma_alias);
4210
4211DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
4212
4213
4214
4215
4216
4217
4218
4219static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
4220{
4221 pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0), 1);
4222 pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0), 1);
4223 pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3), 1);
4224}
4225DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
4226DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242static void quirk_pex_vca_alias(struct pci_dev *pdev)
4243{
4244 const unsigned int num_pci_slots = 0x20;
4245 unsigned int slot;
4246
4247 for (slot = 0; slot < num_pci_slots; slot++)
4248 pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0), 5);
4249}
4250DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
4251DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
4252DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2956, quirk_pex_vca_alias);
4253DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2958, quirk_pex_vca_alias);
4254DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2959, quirk_pex_vca_alias);
4255DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x295A, quirk_pex_vca_alias);
4256
4257
4258
4259
4260
4261
4262static void quirk_bridge_cavm_thrx2_pcie_root(struct pci_dev *pdev)
4263{
4264 pdev->dev_flags |= PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT;
4265}
4266DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000,
4267 quirk_bridge_cavm_thrx2_pcie_root);
4268DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084,
4269 quirk_bridge_cavm_thrx2_pcie_root);
4270
4271
4272
4273
4274
4275static void quirk_tw686x_class(struct pci_dev *pdev)
4276{
4277 u32 class = pdev->class;
4278
4279
4280 pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01;
4281 pci_info(pdev, "TW686x PCI class overridden (%#08x -> %#08x)\n",
4282 class, pdev->class);
4283}
4284DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8,
4285 quirk_tw686x_class);
4286DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6865, PCI_CLASS_NOT_DEFINED, 8,
4287 quirk_tw686x_class);
4288DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
4289 quirk_tw686x_class);
4290DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
4291 quirk_tw686x_class);
4292
4293
4294
4295
4296
4297
4298static void quirk_relaxedordering_disable(struct pci_dev *dev)
4299{
4300 dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
4301 pci_info(dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
4302}
4303
4304
4305
4306
4307
4308
4309DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
4310 quirk_relaxedordering_disable);
4311DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8,
4312 quirk_relaxedordering_disable);
4313DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8,
4314 quirk_relaxedordering_disable);
4315DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8,
4316 quirk_relaxedordering_disable);
4317DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8,
4318 quirk_relaxedordering_disable);
4319DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8,
4320 quirk_relaxedordering_disable);
4321DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8,
4322 quirk_relaxedordering_disable);
4323DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8,
4324 quirk_relaxedordering_disable);
4325DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8,
4326 quirk_relaxedordering_disable);
4327DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8,
4328 quirk_relaxedordering_disable);
4329DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8,
4330 quirk_relaxedordering_disable);
4331DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8,
4332 quirk_relaxedordering_disable);
4333DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8,
4334 quirk_relaxedordering_disable);
4335DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8,
4336 quirk_relaxedordering_disable);
4337DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8,
4338 quirk_relaxedordering_disable);
4339DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8,
4340 quirk_relaxedordering_disable);
4341DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8,
4342 quirk_relaxedordering_disable);
4343DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8,
4344 quirk_relaxedordering_disable);
4345DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8,
4346 quirk_relaxedordering_disable);
4347DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8,
4348 quirk_relaxedordering_disable);
4349DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8,
4350 quirk_relaxedordering_disable);
4351DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8,
4352 quirk_relaxedordering_disable);
4353DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8,
4354 quirk_relaxedordering_disable);
4355DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8,
4356 quirk_relaxedordering_disable);
4357DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8,
4358 quirk_relaxedordering_disable);
4359DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8,
4360 quirk_relaxedordering_disable);
4361DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8,
4362 quirk_relaxedordering_disable);
4363DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8,
4364 quirk_relaxedordering_disable);
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8,
4376 quirk_relaxedordering_disable);
4377DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8,
4378 quirk_relaxedordering_disable);
4379DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8,
4380 quirk_relaxedordering_disable);
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
4406{
4407 struct pci_dev *root_port = pcie_find_root_port(pdev);
4408
4409 if (!root_port) {
4410 pci_warn(pdev, "PCIe Completion erratum may cause device errors\n");
4411 return;
4412 }
4413
4414 pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
4415 dev_name(&pdev->dev));
4416 pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
4417 PCI_EXP_DEVCTL_RELAX_EN |
4418 PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
4419}
4420
4421
4422
4423
4424
4425static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
4426{
4427
4428
4429
4430
4431
4432
4433 if ((pdev->device & 0xff00) == 0x5400)
4434 quirk_disable_root_port_attributes(pdev);
4435}
4436DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
4437 quirk_chelsio_T5_disable_root_port_attributes);
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena)
4451{
4452 if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req)
4453 return 1;
4454 return 0;
4455}
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
4483{
4484#ifdef CONFIG_ACPI
4485 struct acpi_table_header *header = NULL;
4486 acpi_status status;
4487
4488
4489 if (!dev->multifunction || !pci_is_root_bus(dev->bus))
4490 return -ENODEV;
4491
4492
4493 status = acpi_get_table("IVRS", 0, &header);
4494 if (ACPI_FAILURE(status))
4495 return -ENODEV;
4496
4497 acpi_put_table(header);
4498
4499
4500 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
4501
4502 return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR);
4503#else
4504 return -ENODEV;
4505#endif
4506}
4507
4508static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
4509{
4510 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4511 return false;
4512
4513 switch (dev->device) {
4514
4515
4516
4517
4518 case 0xa000 ... 0xa7ff:
4519 case 0xaf84:
4520 case 0xb884:
4521 return true;
4522 default:
4523 return false;
4524 }
4525}
4526
4527static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
4528{
4529 if (!pci_quirk_cavium_acs_match(dev))
4530 return -ENOTTY;
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540 return pci_acs_ctrl_enabled(acs_flags,
4541 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4542}
4543
4544static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
4545{
4546
4547
4548
4549
4550
4551 return pci_acs_ctrl_enabled(acs_flags,
4552 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4553}
4554
4555
4556
4557
4558
4559
4560static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
4561{
4562 if (!pci_is_pcie(dev) ||
4563 ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
4564 (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
4565 return -ENOTTY;
4566
4567 switch (dev->device) {
4568 case 0x0710 ... 0x071e:
4569 case 0x0721:
4570 case 0x0723 ... 0x0732:
4571 return pci_acs_ctrl_enabled(acs_flags,
4572 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4573 }
4574
4575 return false;
4576}
4577
4578
4579
4580
4581
4582
4583
4584static const u16 pci_quirk_intel_pch_acs_ids[] = {
4585
4586 0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49,
4587 0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51,
4588
4589 0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17,
4590 0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f,
4591
4592 0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17,
4593 0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f,
4594
4595 0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17,
4596 0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f,
4597
4598 0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17,
4599 0x9c18, 0x9c19, 0x9c1a, 0x9c1b,
4600
4601 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
4602 0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
4603
4604 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
4605
4606 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
4607 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
4608
4609 0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e,
4610};
4611
4612static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
4613{
4614 int i;
4615
4616
4617 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4618 return false;
4619
4620 for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++)
4621 if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
4622 return true;
4623
4624 return false;
4625}
4626
4627static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
4628{
4629 if (!pci_quirk_intel_pch_acs_match(dev))
4630 return -ENOTTY;
4631
4632 if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
4633 return pci_acs_ctrl_enabled(acs_flags,
4634 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4635
4636 return pci_acs_ctrl_enabled(acs_flags, 0);
4637}
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4650{
4651 return pci_acs_ctrl_enabled(acs_flags,
4652 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4653}
4654
4655
4656
4657
4658
4659
4660
4661static int pci_quirk_nxp_rp_acs(struct pci_dev *dev, u16 acs_flags)
4662{
4663 return pci_acs_ctrl_enabled(acs_flags,
4664 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4665}
4666
4667static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
4668{
4669 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4670 return -ENOTTY;
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680 acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4681
4682 return acs_flags ? 0 : 1;
4683}
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4731{
4732 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4733 return false;
4734
4735 switch (dev->device) {
4736 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a:
4737 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee:
4738 case 0x9d10 ... 0x9d1b:
4739 return true;
4740 }
4741
4742 return false;
4743}
4744
4745#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
4746
4747static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
4748{
4749 int pos;
4750 u32 cap, ctrl;
4751
4752 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4753 return -ENOTTY;
4754
4755 pos = dev->acs_cap;
4756 if (!pos)
4757 return -ENOTTY;
4758
4759
4760 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4761 acs_flags &= (cap | PCI_ACS_EC);
4762
4763 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4764
4765 return pci_acs_ctrl_enabled(acs_flags, ctrl);
4766}
4767
4768static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
4769{
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779 return pci_acs_ctrl_enabled(acs_flags,
4780 PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
4781 PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
4782}
4783
4784static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags)
4785{
4786
4787
4788
4789
4790
4791 if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END)
4792 return -ENOTTY;
4793
4794 return pci_acs_ctrl_enabled(acs_flags,
4795 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4796}
4797
4798static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
4799{
4800
4801
4802
4803
4804
4805
4806 return pci_acs_ctrl_enabled(acs_flags,
4807 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4808}
4809
4810static const struct pci_dev_acs_enabled {
4811 u16 vendor;
4812 u16 device;
4813 int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
4814} pci_dev_acs_enabled[] = {
4815 { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
4816 { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
4817 { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
4818 { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
4819 { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
4820 { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
4821 { PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs },
4822 { PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
4823 { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
4824 { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
4825 { PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs },
4826 { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
4827 { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
4828 { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
4829 { PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
4830 { PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
4831 { PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
4832 { PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
4833 { PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
4834 { PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
4835 { PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
4836 { PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
4837 { PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
4838 { PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
4839 { PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
4840 { PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
4841 { PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
4842 { PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
4843 { PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
4844 { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
4845 { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
4846
4847 { PCI_VENDOR_ID_INTEL, 0x1509, pci_quirk_mf_endpoint_acs },
4848 { PCI_VENDOR_ID_INTEL, 0x150E, pci_quirk_mf_endpoint_acs },
4849 { PCI_VENDOR_ID_INTEL, 0x150F, pci_quirk_mf_endpoint_acs },
4850 { PCI_VENDOR_ID_INTEL, 0x1510, pci_quirk_mf_endpoint_acs },
4851 { PCI_VENDOR_ID_INTEL, 0x1511, pci_quirk_mf_endpoint_acs },
4852 { PCI_VENDOR_ID_INTEL, 0x1516, pci_quirk_mf_endpoint_acs },
4853 { PCI_VENDOR_ID_INTEL, 0x1527, pci_quirk_mf_endpoint_acs },
4854
4855 { PCI_VENDOR_ID_INTEL, 0x10C9, pci_quirk_mf_endpoint_acs },
4856 { PCI_VENDOR_ID_INTEL, 0x10E6, pci_quirk_mf_endpoint_acs },
4857 { PCI_VENDOR_ID_INTEL, 0x10E7, pci_quirk_mf_endpoint_acs },
4858 { PCI_VENDOR_ID_INTEL, 0x10E8, pci_quirk_mf_endpoint_acs },
4859 { PCI_VENDOR_ID_INTEL, 0x150A, pci_quirk_mf_endpoint_acs },
4860 { PCI_VENDOR_ID_INTEL, 0x150D, pci_quirk_mf_endpoint_acs },
4861 { PCI_VENDOR_ID_INTEL, 0x1518, pci_quirk_mf_endpoint_acs },
4862 { PCI_VENDOR_ID_INTEL, 0x1526, pci_quirk_mf_endpoint_acs },
4863
4864 { PCI_VENDOR_ID_INTEL, 0x10A7, pci_quirk_mf_endpoint_acs },
4865 { PCI_VENDOR_ID_INTEL, 0x10A9, pci_quirk_mf_endpoint_acs },
4866 { PCI_VENDOR_ID_INTEL, 0x10D6, pci_quirk_mf_endpoint_acs },
4867
4868 { PCI_VENDOR_ID_INTEL, 0x1521, pci_quirk_mf_endpoint_acs },
4869 { PCI_VENDOR_ID_INTEL, 0x1522, pci_quirk_mf_endpoint_acs },
4870 { PCI_VENDOR_ID_INTEL, 0x1523, pci_quirk_mf_endpoint_acs },
4871 { PCI_VENDOR_ID_INTEL, 0x1524, pci_quirk_mf_endpoint_acs },
4872
4873 { PCI_VENDOR_ID_INTEL, 0x105E, pci_quirk_mf_endpoint_acs },
4874 { PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs },
4875 { PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs },
4876 { PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs },
4877
4878 { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
4879 { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
4880 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs },
4881
4882 { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
4883 { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
4884
4885 { PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs },
4886
4887 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
4888 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
4889 { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs },
4890 { 0x10df, 0x720, pci_quirk_mf_endpoint_acs },
4891
4892 { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
4893
4894 { PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs },
4895 { PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs },
4896 { PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs },
4897
4898 { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
4899
4900 { PCI_VENDOR_ID_AMPERE, 0xE005, pci_quirk_xgene_acs },
4901 { PCI_VENDOR_ID_AMPERE, 0xE006, pci_quirk_xgene_acs },
4902 { PCI_VENDOR_ID_AMPERE, 0xE007, pci_quirk_xgene_acs },
4903 { PCI_VENDOR_ID_AMPERE, 0xE008, pci_quirk_xgene_acs },
4904 { PCI_VENDOR_ID_AMPERE, 0xE009, pci_quirk_xgene_acs },
4905 { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
4906 { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
4907 { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
4908
4909 { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
4910 { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
4911
4912 { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
4913
4914 { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
4915 { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
4916 { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
4917
4918
4919 { PCI_VENDOR_ID_NXP, 0x8d81, pci_quirk_nxp_rp_acs },
4920 { PCI_VENDOR_ID_NXP, 0x8da1, pci_quirk_nxp_rp_acs },
4921 { PCI_VENDOR_ID_NXP, 0x8d83, pci_quirk_nxp_rp_acs },
4922
4923 { PCI_VENDOR_ID_NXP, 0x8d80, pci_quirk_nxp_rp_acs },
4924 { PCI_VENDOR_ID_NXP, 0x8da0, pci_quirk_nxp_rp_acs },
4925 { PCI_VENDOR_ID_NXP, 0x8d82, pci_quirk_nxp_rp_acs },
4926
4927 { PCI_VENDOR_ID_NXP, 0x8d90, pci_quirk_nxp_rp_acs },
4928 { PCI_VENDOR_ID_NXP, 0x8db0, pci_quirk_nxp_rp_acs },
4929 { PCI_VENDOR_ID_NXP, 0x8d92, pci_quirk_nxp_rp_acs },
4930
4931 { PCI_VENDOR_ID_NXP, 0x8d91, pci_quirk_nxp_rp_acs },
4932 { PCI_VENDOR_ID_NXP, 0x8db1, pci_quirk_nxp_rp_acs },
4933 { PCI_VENDOR_ID_NXP, 0x8d93, pci_quirk_nxp_rp_acs },
4934
4935 { PCI_VENDOR_ID_NXP, 0x8d89, pci_quirk_nxp_rp_acs },
4936 { PCI_VENDOR_ID_NXP, 0x8da9, pci_quirk_nxp_rp_acs },
4937 { PCI_VENDOR_ID_NXP, 0x8d8b, pci_quirk_nxp_rp_acs },
4938
4939 { PCI_VENDOR_ID_NXP, 0x8d88, pci_quirk_nxp_rp_acs },
4940 { PCI_VENDOR_ID_NXP, 0x8da8, pci_quirk_nxp_rp_acs },
4941 { PCI_VENDOR_ID_NXP, 0x8d8a, pci_quirk_nxp_rp_acs },
4942
4943 { PCI_VENDOR_ID_NXP, 0x8d98, pci_quirk_nxp_rp_acs },
4944 { PCI_VENDOR_ID_NXP, 0x8db8, pci_quirk_nxp_rp_acs },
4945 { PCI_VENDOR_ID_NXP, 0x8d9a, pci_quirk_nxp_rp_acs },
4946
4947 { PCI_VENDOR_ID_NXP, 0x8d99, pci_quirk_nxp_rp_acs },
4948 { PCI_VENDOR_ID_NXP, 0x8db9, pci_quirk_nxp_rp_acs },
4949 { PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
4950
4951 { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
4952 { 0 }
4953};
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
4967{
4968 const struct pci_dev_acs_enabled *i;
4969 int ret;
4970
4971
4972
4973
4974
4975
4976
4977 for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
4978 if ((i->vendor == dev->vendor ||
4979 i->vendor == (u16)PCI_ANY_ID) &&
4980 (i->device == dev->device ||
4981 i->device == (u16)PCI_ANY_ID)) {
4982 ret = i->acs_enabled(dev, acs_flags);
4983 if (ret >= 0)
4984 return ret;
4985 }
4986 }
4987
4988 return -ENOTTY;
4989}
4990
4991
4992#define INTEL_LPC_RCBA_REG 0xf0
4993
4994#define INTEL_LPC_RCBA_MASK 0xffffc000
4995
4996#define INTEL_LPC_RCBA_ENABLE (1 << 0)
4997
4998
4999#define INTEL_BSPR_REG 0x1104
5000
5001#define INTEL_BSPR_REG_BPNPD (1 << 8)
5002
5003#define INTEL_BSPR_REG_BPPD (1 << 9)
5004
5005
5006#define INTEL_UPDCR_REG 0x1014
5007
5008#define INTEL_UPDCR_REG_MASK 0x3f
5009
5010static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
5011{
5012 u32 rcba, bspr, updcr;
5013 void __iomem *rcba_mem;
5014
5015
5016
5017
5018
5019
5020 pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
5021 INTEL_LPC_RCBA_REG, &rcba);
5022 if (!(rcba & INTEL_LPC_RCBA_ENABLE))
5023 return -EINVAL;
5024
5025 rcba_mem = ioremap(rcba & INTEL_LPC_RCBA_MASK,
5026 PAGE_ALIGN(INTEL_UPDCR_REG));
5027 if (!rcba_mem)
5028 return -ENOMEM;
5029
5030
5031
5032
5033
5034
5035
5036
5037 bspr = readl(rcba_mem + INTEL_BSPR_REG);
5038 bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD;
5039 if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
5040 updcr = readl(rcba_mem + INTEL_UPDCR_REG);
5041 if (updcr & INTEL_UPDCR_REG_MASK) {
5042 pci_info(dev, "Disabling UPDCR peer decodes\n");
5043 updcr &= ~INTEL_UPDCR_REG_MASK;
5044 writel(updcr, rcba_mem + INTEL_UPDCR_REG);
5045 }
5046 }
5047
5048 iounmap(rcba_mem);
5049 return 0;
5050}
5051
5052
5053#define INTEL_MPC_REG 0xd8
5054
5055#define INTEL_MPC_REG_IRBNCE (1 << 26)
5056
5057static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
5058{
5059 u32 mpc;
5060
5061
5062
5063
5064
5065
5066
5067 pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
5068 if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
5069 pci_info(dev, "Enabling MPC IRBNCE\n");
5070 mpc |= INTEL_MPC_REG_IRBNCE;
5071 pci_write_config_word(dev, INTEL_MPC_REG, mpc);
5072 }
5073}
5074
5075
5076
5077
5078
5079
5080
5081
5082static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
5083{
5084 if (!pci_quirk_intel_pch_acs_match(dev))
5085 return -ENOTTY;
5086
5087 if (pci_quirk_enable_intel_lpc_acs(dev)) {
5088 pci_warn(dev, "Failed to enable Intel PCH ACS quirk\n");
5089 return 0;
5090 }
5091
5092 pci_quirk_enable_intel_rp_mpc_acs(dev);
5093
5094 dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
5095
5096 pci_info(dev, "Intel PCH root port ACS workaround enabled\n");
5097
5098 return 0;
5099}
5100
5101static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
5102{
5103 int pos;
5104 u32 cap, ctrl;
5105
5106 if (!pci_quirk_intel_spt_pch_acs_match(dev))
5107 return -ENOTTY;
5108
5109 pos = dev->acs_cap;
5110 if (!pos)
5111 return -ENOTTY;
5112
5113 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
5114 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
5115
5116 ctrl |= (cap & PCI_ACS_SV);
5117 ctrl |= (cap & PCI_ACS_RR);
5118 ctrl |= (cap & PCI_ACS_CR);
5119 ctrl |= (cap & PCI_ACS_UF);
5120
5121 if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
5122 ctrl |= (cap & PCI_ACS_TB);
5123
5124 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
5125
5126 pci_info(dev, "Intel SPT PCH root port ACS workaround enabled\n");
5127
5128 return 0;
5129}
5130
5131static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev)
5132{
5133 int pos;
5134 u32 cap, ctrl;
5135
5136 if (!pci_quirk_intel_spt_pch_acs_match(dev))
5137 return -ENOTTY;
5138
5139 pos = dev->acs_cap;
5140 if (!pos)
5141 return -ENOTTY;
5142
5143 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
5144 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
5145
5146 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
5147
5148 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
5149
5150 pci_info(dev, "Intel SPT PCH root port workaround: disabled ACS redirect\n");
5151
5152 return 0;
5153}
5154
5155static const struct pci_dev_acs_ops {
5156 u16 vendor;
5157 u16 device;
5158 int (*enable_acs)(struct pci_dev *dev);
5159 int (*disable_acs_redir)(struct pci_dev *dev);
5160} pci_dev_acs_ops[] = {
5161 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
5162 .enable_acs = pci_quirk_enable_intel_pch_acs,
5163 },
5164 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
5165 .enable_acs = pci_quirk_enable_intel_spt_pch_acs,
5166 .disable_acs_redir = pci_quirk_disable_intel_spt_pch_acs_redir,
5167 },
5168};
5169
5170int pci_dev_specific_enable_acs(struct pci_dev *dev)
5171{
5172 const struct pci_dev_acs_ops *p;
5173 int i, ret;
5174
5175 for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
5176 p = &pci_dev_acs_ops[i];
5177 if ((p->vendor == dev->vendor ||
5178 p->vendor == (u16)PCI_ANY_ID) &&
5179 (p->device == dev->device ||
5180 p->device == (u16)PCI_ANY_ID) &&
5181 p->enable_acs) {
5182 ret = p->enable_acs(dev);
5183 if (ret >= 0)
5184 return ret;
5185 }
5186 }
5187
5188 return -ENOTTY;
5189}
5190
5191int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
5192{
5193 const struct pci_dev_acs_ops *p;
5194 int i, ret;
5195
5196 for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
5197 p = &pci_dev_acs_ops[i];
5198 if ((p->vendor == dev->vendor ||
5199 p->vendor == (u16)PCI_ANY_ID) &&
5200 (p->device == dev->device ||
5201 p->device == (u16)PCI_ANY_ID) &&
5202 p->disable_acs_redir) {
5203 ret = p->disable_acs_redir(dev);
5204 if (ret >= 0)
5205 return ret;
5206 }
5207 }
5208
5209 return -ENOTTY;
5210}
5211
5212
5213
5214
5215
5216
5217
5218
5219static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
5220{
5221 int pos, i = 0;
5222 u8 next_cap;
5223 u16 reg16, *cap;
5224 struct pci_cap_saved_state *state;
5225
5226
5227 if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP))
5228 return;
5229
5230
5231 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
5232 if (!pos)
5233 return;
5234
5235
5236
5237
5238
5239 pci_read_config_byte(pdev, pos + 1, &next_cap);
5240 if (next_cap)
5241 return;
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251 pos = 0x50;
5252 pci_read_config_word(pdev, pos, ®16);
5253 if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) {
5254 u32 status;
5255#ifndef PCI_EXP_SAVE_REGS
5256#define PCI_EXP_SAVE_REGS 7
5257#endif
5258 int size = PCI_EXP_SAVE_REGS * sizeof(u16);
5259
5260 pdev->pcie_cap = pos;
5261 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
5262 pdev->pcie_flags_reg = reg16;
5263 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16);
5264 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
5265
5266 pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
5267 if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
5268 PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
5269 pdev->cfg_size = PCI_CFG_SPACE_SIZE;
5270
5271 if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
5272 return;
5273
5274
5275 state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
5276 if (!state)
5277 return;
5278
5279 state->cap.cap_nr = PCI_CAP_ID_EXP;
5280 state->cap.cap_extended = 0;
5281 state->cap.size = size;
5282 cap = (u16 *)&state->cap.data[0];
5283 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]);
5284 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]);
5285 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]);
5286 pcie_capability_read_word(pdev, PCI_EXP_RTCTL, &cap[i++]);
5287 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]);
5288 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]);
5289 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]);
5290 hlist_add_head(&state->next, &pdev->saved_cap_space);
5291 }
5292}
5293DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303
5304
5305static void quirk_no_flr(struct pci_dev *dev)
5306{
5307 dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
5308}
5309DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
5310DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
5311DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
5312DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
5313DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
5314
5315static void quirk_no_ext_tags(struct pci_dev *pdev)
5316{
5317 struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
5318
5319 if (!bridge)
5320 return;
5321
5322 bridge->no_ext_tags = 1;
5323 pci_info(pdev, "disabling Extended Tags (this device can't handle them)\n");
5324
5325 pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
5326}
5327DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
5328DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
5329DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
5330DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags);
5331DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);
5332DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
5333DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
5334
5335#ifdef CONFIG_PCI_ATS
5336
5337
5338
5339
5340
5341static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
5342{
5343 if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
5344 (pdev->device == 0x7340 && pdev->revision != 0xc5) ||
5345 (pdev->device == 0x7341 && pdev->revision != 0x00))
5346 return;
5347
5348 if (pdev->device == 0x15d8) {
5349 if (pdev->revision == 0xcf &&
5350 pdev->subsystem_vendor == 0xea50 &&
5351 (pdev->subsystem_device == 0xce19 ||
5352 pdev->subsystem_device == 0xcc10 ||
5353 pdev->subsystem_device == 0xcc08))
5354 goto no_ats;
5355 else
5356 return;
5357 }
5358
5359no_ats:
5360 pci_info(pdev, "disabling ATS\n");
5361 pdev->ats_cap = 0;
5362}
5363
5364
5365DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
5366
5367DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
5368
5369DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
5370
5371DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
5372DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
5373
5374DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
5375#endif
5376
5377
5378static void quirk_fsl_no_msi(struct pci_dev *pdev)
5379{
5380 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
5381 pdev->no_msi = 1;
5382}
5383DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer,
5394 unsigned int supplier, unsigned int class,
5395 unsigned int class_shift)
5396{
5397 struct pci_dev *supplier_pdev;
5398
5399 if (PCI_FUNC(pdev->devfn) != consumer)
5400 return;
5401
5402 supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
5403 pdev->bus->number,
5404 PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier));
5405 if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) {
5406 pci_dev_put(supplier_pdev);
5407 return;
5408 }
5409
5410 if (device_link_add(&pdev->dev, &supplier_pdev->dev,
5411 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME))
5412 pci_info(pdev, "D0 power state depends on %s\n",
5413 pci_name(supplier_pdev));
5414 else
5415 pci_err(pdev, "Cannot enforce power dependency on %s\n",
5416 pci_name(supplier_pdev));
5417
5418 pm_runtime_allow(&pdev->dev);
5419 pci_dev_put(supplier_pdev);
5420}
5421
5422
5423
5424
5425
5426static void quirk_gpu_hda(struct pci_dev *hda)
5427{
5428 pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16);
5429}
5430DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
5431 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5432DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID,
5433 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5434DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5435 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5436
5437
5438
5439
5440
5441static void quirk_gpu_usb(struct pci_dev *usb)
5442{
5443 pci_create_device_link(usb, 2, 0, PCI_BASE_CLASS_DISPLAY, 16);
5444}
5445DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5446 PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
5447DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
5448 PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
5449
5450
5451
5452
5453
5454
5455
5456#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80
5457static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
5458{
5459 pci_create_device_link(ucsi, 3, 0, PCI_BASE_CLASS_DISPLAY, 16);
5460}
5461DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5462 PCI_CLASS_SERIAL_UNKNOWN, 8,
5463 quirk_gpu_usb_typec_ucsi);
5464DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
5465 PCI_CLASS_SERIAL_UNKNOWN, 8,
5466 quirk_gpu_usb_typec_ucsi);
5467
5468
5469
5470
5471
5472static void quirk_nvidia_hda(struct pci_dev *gpu)
5473{
5474 u8 hdr_type;
5475 u32 val;
5476
5477
5478 if (gpu->device < PCI_DEVICE_ID_NVIDIA_GEFORCE_320M)
5479 return;
5480
5481
5482 pci_read_config_dword(gpu, 0x488, &val);
5483 if (val & BIT(25))
5484 return;
5485
5486 pci_info(gpu, "Enabling HDA controller\n");
5487 pci_write_config_dword(gpu, 0x488, val | BIT(25));
5488
5489
5490 pci_read_config_byte(gpu, PCI_HEADER_TYPE, &hdr_type);
5491 gpu->multifunction = !!(hdr_type & 0x80);
5492}
5493DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5494 PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
5495DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5496 PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout)
5524{
5525 int pos;
5526 u16 ctrl = 0;
5527 bool found;
5528 struct pci_dev *bridge = bus->self;
5529
5530 pos = bridge->acs_cap;
5531
5532
5533 if (pos) {
5534 pci_read_config_word(bridge, pos + PCI_ACS_CTRL, &ctrl);
5535 if (ctrl & PCI_ACS_SV)
5536 pci_write_config_word(bridge, pos + PCI_ACS_CTRL,
5537 ctrl & ~PCI_ACS_SV);
5538 }
5539
5540 found = pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
5541
5542
5543 if (found)
5544 pci_bus_write_config_word(bus, devfn, PCI_VENDOR_ID, 0);
5545
5546
5547 if (ctrl & PCI_ACS_SV)
5548 pci_write_config_word(bridge, pos + PCI_ACS_CTRL, ctrl);
5549
5550 return found;
5551}
5552
5553
5554
5555
5556
5557
5558
5559
5560static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
5561{
5562 void __iomem *mmio;
5563 struct ntb_info_regs __iomem *mmio_ntb;
5564 struct ntb_ctrl_regs __iomem *mmio_ctrl;
5565 u64 partition_map;
5566 u8 partition;
5567 int pp;
5568
5569 if (pci_enable_device(pdev)) {
5570 pci_err(pdev, "Cannot enable Switchtec device\n");
5571 return;
5572 }
5573
5574 mmio = pci_iomap(pdev, 0, 0);
5575 if (mmio == NULL) {
5576 pci_disable_device(pdev);
5577 pci_err(pdev, "Cannot iomap Switchtec device\n");
5578 return;
5579 }
5580
5581 pci_info(pdev, "Setting Switchtec proxy ID aliases\n");
5582
5583 mmio_ntb = mmio + SWITCHTEC_GAS_NTB_OFFSET;
5584 mmio_ctrl = (void __iomem *) mmio_ntb + SWITCHTEC_NTB_REG_CTRL_OFFSET;
5585
5586 partition = ioread8(&mmio_ntb->partition_id);
5587
5588 partition_map = ioread32(&mmio_ntb->ep_map);
5589 partition_map |= ((u64) ioread32(&mmio_ntb->ep_map + 4)) << 32;
5590 partition_map &= ~(1ULL << partition);
5591
5592 for (pp = 0; pp < (sizeof(partition_map) * 8); pp++) {
5593 struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
5594 u32 table_sz = 0;
5595 int te;
5596
5597 if (!(partition_map & (1ULL << pp)))
5598 continue;
5599
5600 pci_dbg(pdev, "Processing partition %d\n", pp);
5601
5602 mmio_peer_ctrl = &mmio_ctrl[pp];
5603
5604 table_sz = ioread16(&mmio_peer_ctrl->req_id_table_size);
5605 if (!table_sz) {
5606 pci_warn(pdev, "Partition %d table_sz 0\n", pp);
5607 continue;
5608 }
5609
5610 if (table_sz > 512) {
5611 pci_warn(pdev,
5612 "Invalid Switchtec partition %d table_sz %d\n",
5613 pp, table_sz);
5614 continue;
5615 }
5616
5617 for (te = 0; te < table_sz; te++) {
5618 u32 rid_entry;
5619 u8 devfn;
5620
5621 rid_entry = ioread32(&mmio_peer_ctrl->req_id_table[te]);
5622 devfn = (rid_entry >> 1) & 0xFF;
5623 pci_dbg(pdev,
5624 "Aliasing Partition %d Proxy ID %02x.%d\n",
5625 pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
5626 pci_add_dma_alias(pdev, devfn, 1);
5627 }
5628 }
5629
5630 pci_iounmap(pdev, mmio);
5631 pci_disable_device(pdev);
5632}
5633#define SWITCHTEC_QUIRK(vid) \
5634 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
5635 PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
5636
5637SWITCHTEC_QUIRK(0x8531);
5638SWITCHTEC_QUIRK(0x8532);
5639SWITCHTEC_QUIRK(0x8533);
5640SWITCHTEC_QUIRK(0x8534);
5641SWITCHTEC_QUIRK(0x8535);
5642SWITCHTEC_QUIRK(0x8536);
5643SWITCHTEC_QUIRK(0x8541);
5644SWITCHTEC_QUIRK(0x8542);
5645SWITCHTEC_QUIRK(0x8543);
5646SWITCHTEC_QUIRK(0x8544);
5647SWITCHTEC_QUIRK(0x8545);
5648SWITCHTEC_QUIRK(0x8546);
5649SWITCHTEC_QUIRK(0x8551);
5650SWITCHTEC_QUIRK(0x8552);
5651SWITCHTEC_QUIRK(0x8553);
5652SWITCHTEC_QUIRK(0x8554);
5653SWITCHTEC_QUIRK(0x8555);
5654SWITCHTEC_QUIRK(0x8556);
5655SWITCHTEC_QUIRK(0x8561);
5656SWITCHTEC_QUIRK(0x8562);
5657SWITCHTEC_QUIRK(0x8563);
5658SWITCHTEC_QUIRK(0x8564);
5659SWITCHTEC_QUIRK(0x8565);
5660SWITCHTEC_QUIRK(0x8566);
5661SWITCHTEC_QUIRK(0x8571);
5662SWITCHTEC_QUIRK(0x8572);
5663SWITCHTEC_QUIRK(0x8573);
5664SWITCHTEC_QUIRK(0x8574);
5665SWITCHTEC_QUIRK(0x8575);
5666SWITCHTEC_QUIRK(0x8576);
5667SWITCHTEC_QUIRK(0x4000);
5668SWITCHTEC_QUIRK(0x4084);
5669SWITCHTEC_QUIRK(0x4068);
5670SWITCHTEC_QUIRK(0x4052);
5671SWITCHTEC_QUIRK(0x4036);
5672SWITCHTEC_QUIRK(0x4028);
5673SWITCHTEC_QUIRK(0x4100);
5674SWITCHTEC_QUIRK(0x4184);
5675SWITCHTEC_QUIRK(0x4168);
5676SWITCHTEC_QUIRK(0x4152);
5677SWITCHTEC_QUIRK(0x4136);
5678SWITCHTEC_QUIRK(0x4128);
5679SWITCHTEC_QUIRK(0x4200);
5680SWITCHTEC_QUIRK(0x4284);
5681SWITCHTEC_QUIRK(0x4268);
5682SWITCHTEC_QUIRK(0x4252);
5683SWITCHTEC_QUIRK(0x4236);
5684SWITCHTEC_QUIRK(0x4228);
5685
5686
5687
5688
5689
5690
5691
5692static void quirk_plx_ntb_dma_alias(struct pci_dev *pdev)
5693{
5694 pci_info(pdev, "Setting PLX NTB proxy ID aliases\n");
5695
5696 pci_add_dma_alias(pdev, 0, 256);
5697}
5698DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b0, quirk_plx_ntb_dma_alias);
5699DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b1, quirk_plx_ntb_dma_alias);
5700
5701
5702
5703
5704
5705
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715
5716
5717static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
5718{
5719 void __iomem *map;
5720 int ret;
5721
5722 if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
5723 pdev->subsystem_device != 0x222e ||
5724 !pci_reset_supported(pdev))
5725 return;
5726
5727 if (pci_enable_device_mem(pdev))
5728 return;
5729
5730
5731
5732
5733
5734 map = pci_iomap(pdev, 0, 0x23000);
5735 if (!map) {
5736 pci_err(pdev, "Can't map MMIO space\n");
5737 goto out_disable;
5738 }
5739
5740
5741
5742
5743
5744 if (ioread32(map + 0x2240c) & 0x2) {
5745 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
5746 ret = pci_reset_bus(pdev);
5747 if (ret < 0)
5748 pci_err(pdev, "Failed to reset GPU: %d\n", ret);
5749 }
5750
5751 iounmap(map);
5752out_disable:
5753 pci_disable_device(pdev);
5754}
5755DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
5756 PCI_CLASS_DISPLAY_VGA, 8,
5757 quirk_reset_lenovo_thinkpad_p50_nvgpu);
5758
5759
5760
5761
5762
5763static void pci_fixup_no_d0_pme(struct pci_dev *dev)
5764{
5765 pci_info(dev, "PME# does not work under D0, disabling it\n");
5766 dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
5767}
5768DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
5769
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780static void pci_fixup_no_msi_no_pme(struct pci_dev *dev)
5781{
5782#ifdef CONFIG_PCI_MSI
5783 pci_info(dev, "MSI is not implemented on this device, disabling it\n");
5784 dev->no_msi = 1;
5785#endif
5786 pci_info(dev, "PME# is unreliable, disabling it\n");
5787 dev->pme_support = 0;
5788}
5789DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_msi_no_pme);
5790DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_msi_no_pme);
5791
5792static void apex_pci_fixup_class(struct pci_dev *pdev)
5793{
5794 pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
5795}
5796DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a,
5797 PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
5798