1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/pci.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/acpi.h>
22#include <linux/dmi.h>
23#include <linux/ioport.h>
24#include <linux/sched.h>
25#include <linux/ktime.h>
26#include <linux/mm.h>
27#include <linux/nvme.h>
28#include <linux/platform_data/x86/apple.h>
29#include <linux/pm_runtime.h>
30#include <linux/switchtec.h>
31#include <asm/dma.h>
32#include "pci.h"
33
34static ktime_t fixup_debug_start(struct pci_dev *dev,
35 void (*fn)(struct pci_dev *dev))
36{
37 if (initcall_debug)
38 pci_info(dev, "calling %pS @ %i\n", fn, task_pid_nr(current));
39
40 return ktime_get();
41}
42
43static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
44 void (*fn)(struct pci_dev *dev))
45{
46 ktime_t delta, rettime;
47 unsigned long long duration;
48
49 rettime = ktime_get();
50 delta = ktime_sub(rettime, calltime);
51 duration = (unsigned long long) ktime_to_ns(delta) >> 10;
52 if (initcall_debug || duration > 10000)
53 pci_info(dev, "%pS took %lld usecs\n", fn, duration);
54}
55
56static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
57 struct pci_fixup *end)
58{
59 ktime_t calltime;
60
61 for (; f < end; f++)
62 if ((f->class == (u32) (dev->class >> f->class_shift) ||
63 f->class == (u32) PCI_ANY_ID) &&
64 (f->vendor == dev->vendor ||
65 f->vendor == (u16) PCI_ANY_ID) &&
66 (f->device == dev->device ||
67 f->device == (u16) PCI_ANY_ID)) {
68 void (*hook)(struct pci_dev *dev);
69#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
70 hook = offset_to_ptr(&f->hook_offset);
71#else
72 hook = f->hook;
73#endif
74 calltime = fixup_debug_start(dev, hook);
75 hook(dev);
76 fixup_debug_report(dev, calltime, hook);
77 }
78}
79
80extern struct pci_fixup __start_pci_fixups_early[];
81extern struct pci_fixup __end_pci_fixups_early[];
82extern struct pci_fixup __start_pci_fixups_header[];
83extern struct pci_fixup __end_pci_fixups_header[];
84extern struct pci_fixup __start_pci_fixups_final[];
85extern struct pci_fixup __end_pci_fixups_final[];
86extern struct pci_fixup __start_pci_fixups_enable[];
87extern struct pci_fixup __end_pci_fixups_enable[];
88extern struct pci_fixup __start_pci_fixups_resume[];
89extern struct pci_fixup __end_pci_fixups_resume[];
90extern struct pci_fixup __start_pci_fixups_resume_early[];
91extern struct pci_fixup __end_pci_fixups_resume_early[];
92extern struct pci_fixup __start_pci_fixups_suspend[];
93extern struct pci_fixup __end_pci_fixups_suspend[];
94extern struct pci_fixup __start_pci_fixups_suspend_late[];
95extern struct pci_fixup __end_pci_fixups_suspend_late[];
96
97static bool pci_apply_fixup_final_quirks;
98
99void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
100{
101 struct pci_fixup *start, *end;
102
103 switch (pass) {
104 case pci_fixup_early:
105 start = __start_pci_fixups_early;
106 end = __end_pci_fixups_early;
107 break;
108
109 case pci_fixup_header:
110 start = __start_pci_fixups_header;
111 end = __end_pci_fixups_header;
112 break;
113
114 case pci_fixup_final:
115 if (!pci_apply_fixup_final_quirks)
116 return;
117 start = __start_pci_fixups_final;
118 end = __end_pci_fixups_final;
119 break;
120
121 case pci_fixup_enable:
122 start = __start_pci_fixups_enable;
123 end = __end_pci_fixups_enable;
124 break;
125
126 case pci_fixup_resume:
127 start = __start_pci_fixups_resume;
128 end = __end_pci_fixups_resume;
129 break;
130
131 case pci_fixup_resume_early:
132 start = __start_pci_fixups_resume_early;
133 end = __end_pci_fixups_resume_early;
134 break;
135
136 case pci_fixup_suspend:
137 start = __start_pci_fixups_suspend;
138 end = __end_pci_fixups_suspend;
139 break;
140
141 case pci_fixup_suspend_late:
142 start = __start_pci_fixups_suspend_late;
143 end = __end_pci_fixups_suspend_late;
144 break;
145
146 default:
147
148 return;
149 }
150 pci_do_fixups(dev, start, end);
151}
152EXPORT_SYMBOL(pci_fixup_device);
153
154static int __init pci_apply_final_quirks(void)
155{
156 struct pci_dev *dev = NULL;
157 u8 cls = 0;
158 u8 tmp;
159
160 if (pci_cache_line_size)
161 pr_info("PCI: CLS %u bytes\n", pci_cache_line_size << 2);
162
163 pci_apply_fixup_final_quirks = true;
164 for_each_pci_dev(dev) {
165 pci_fixup_device(pci_fixup_final, dev);
166
167
168
169
170
171 if (!pci_cache_line_size) {
172 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
173 if (!cls)
174 cls = tmp;
175 if (!tmp || cls == tmp)
176 continue;
177
178 pci_info(dev, "CLS mismatch (%u != %u), using %u bytes\n",
179 cls << 2, tmp << 2,
180 pci_dfl_cache_line_size << 2);
181 pci_cache_line_size = pci_dfl_cache_line_size;
182 }
183 }
184
185 if (!pci_cache_line_size) {
186 pr_info("PCI: CLS %u bytes, default %u\n", cls << 2,
187 pci_dfl_cache_line_size << 2);
188 pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
189 }
190
191 return 0;
192}
193fs_initcall_sync(pci_apply_final_quirks);
194
195
196
197
198
199
200
201static void quirk_mmio_always_on(struct pci_dev *dev)
202{
203 dev->mmio_always_on = 1;
204}
205DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
206 PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
207
208
209
210
211
212
213static void quirk_mellanox_tavor(struct pci_dev *dev)
214{
215 dev->broken_parity_status = 1;
216}
217DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
218DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
219
220
221
222
223
224static void quirk_passive_release(struct pci_dev *dev)
225{
226 struct pci_dev *d = NULL;
227 unsigned char dlc;
228
229
230
231
232
233 while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
234 pci_read_config_byte(d, 0x82, &dlc);
235 if (!(dlc & 1<<1)) {
236 pci_info(d, "PIIX3: Enabling Passive Release\n");
237 dlc |= 1<<1;
238 pci_write_config_byte(d, 0x82, dlc);
239 }
240 }
241}
242DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
243DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
244
245
246
247
248
249
250
251
252
253static void quirk_isa_dma_hangs(struct pci_dev *dev)
254{
255 if (!isa_dma_bridge_buggy) {
256 isa_dma_bridge_buggy = 1;
257 pci_info(dev, "Activating ISA DMA hang workarounds\n");
258 }
259}
260
261
262
263
264DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
265DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
266DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
267DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
268DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
269DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
270DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
271
272
273
274
275
276static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
277{
278 u32 pmbase;
279 u16 pm1a;
280
281 pci_read_config_dword(dev, 0x40, &pmbase);
282 pmbase = pmbase & 0xff80;
283 pm1a = inw(pmbase);
284
285 if (pm1a & 0x10) {
286 pci_info(dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
287 outw(0x10, pmbase);
288 }
289}
290DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
291
292
293static void quirk_nopcipci(struct pci_dev *dev)
294{
295 if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
296 pci_info(dev, "Disabling direct PCI/PCI transfers\n");
297 pci_pci_problems |= PCIPCI_FAIL;
298 }
299}
300DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci);
301DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci);
302
303static void quirk_nopciamd(struct pci_dev *dev)
304{
305 u8 rev;
306 pci_read_config_byte(dev, 0x08, &rev);
307 if (rev == 0x13) {
308
309 pci_info(dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
310 pci_pci_problems |= PCIAGP_FAIL;
311 }
312}
313DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd);
314
315
316static void quirk_triton(struct pci_dev *dev)
317{
318 if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
319 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
320 pci_pci_problems |= PCIPCI_TRITON;
321 }
322}
323DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
324DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
325DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
326DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
327
328
329
330
331
332
333
334
335
336
337
338static void quirk_vialatency(struct pci_dev *dev)
339{
340 struct pci_dev *p;
341 u8 busarb;
342
343
344
345
346
347 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
348 if (p != NULL) {
349
350
351
352
353
354
355 if (p->revision < 0x40 || p->revision > 0x42)
356 goto exit;
357 } else {
358 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
359 if (p == NULL)
360 goto exit;
361
362
363 if (p->revision < 0x10 || p->revision > 0x12)
364 goto exit;
365 }
366
367
368
369
370
371
372
373
374
375
376
377
378
379 pci_read_config_byte(dev, 0x76, &busarb);
380
381
382
383
384
385 busarb &= ~(1<<5);
386 busarb |= (1<<4);
387 pci_write_config_byte(dev, 0x76, busarb);
388 pci_info(dev, "Applying VIA southbridge workaround\n");
389exit:
390 pci_dev_put(p);
391}
392DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
393DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
394DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
395
396DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
397DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
398DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
399
400
401static void quirk_viaetbf(struct pci_dev *dev)
402{
403 if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
404 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
405 pci_pci_problems |= PCIPCI_VIAETBF;
406 }
407}
408DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf);
409
410static void quirk_vsfx(struct pci_dev *dev)
411{
412 if ((pci_pci_problems&PCIPCI_VSFX) == 0) {
413 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
414 pci_pci_problems |= PCIPCI_VSFX;
415 }
416}
417DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx);
418
419
420
421
422
423
424static void quirk_alimagik(struct pci_dev *dev)
425{
426 if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) {
427 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
428 pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
429 }
430}
431DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
432DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
433
434
435static void quirk_natoma(struct pci_dev *dev)
436{
437 if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
438 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
439 pci_pci_problems |= PCIPCI_NATOMA;
440 }
441}
442DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
443DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
444DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
445DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
446DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
447DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
448
449
450
451
452
453static void quirk_citrine(struct pci_dev *dev)
454{
455 dev->cfg_size = 0xA0;
456}
457DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
458
459
460
461
462
463static void quirk_nfp6000(struct pci_dev *dev)
464{
465 dev->cfg_size = 0x600;
466}
467DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000);
468DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000);
469DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, quirk_nfp6000);
470DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000);
471
472
473static void quirk_extend_bar_to_page(struct pci_dev *dev)
474{
475 int i;
476
477 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
478 struct resource *r = &dev->resource[i];
479
480 if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
481 r->end = PAGE_SIZE - 1;
482 r->start = 0;
483 r->flags |= IORESOURCE_UNSET;
484 pci_info(dev, "expanded BAR %d to page size: %pR\n",
485 i, r);
486 }
487 }
488}
489DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
490
491
492
493
494
495static void quirk_s3_64M(struct pci_dev *dev)
496{
497 struct resource *r = &dev->resource[0];
498
499 if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
500 r->flags |= IORESOURCE_UNSET;
501 r->start = 0;
502 r->end = 0x3ffffff;
503 }
504}
505DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
506DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
507
508static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
509 const char *name)
510{
511 u32 region;
512 struct pci_bus_region bus_region;
513 struct resource *res = dev->resource + pos;
514
515 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion);
516
517 if (!region)
518 return;
519
520 res->name = pci_name(dev);
521 res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
522 res->flags |=
523 (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
524 region &= ~(size - 1);
525
526
527 bus_region.start = region;
528 bus_region.end = region + size - 1;
529 pcibios_bus_to_resource(dev->bus, res, &bus_region);
530
531 pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
532 name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
533}
534
535
536
537
538
539
540
541
542
543
544static void quirk_cs5536_vsa(struct pci_dev *dev)
545{
546 static char *name = "CS5536 ISA bridge";
547
548 if (pci_resource_len(dev, 0) != 8) {
549 quirk_io(dev, 0, 8, name);
550 quirk_io(dev, 1, 256, name);
551 quirk_io(dev, 2, 64, name);
552 pci_info(dev, "%s bug detected (incorrect header); workaround applied\n",
553 name);
554 }
555}
556DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
557
558static void quirk_io_region(struct pci_dev *dev, int port,
559 unsigned size, int nr, const char *name)
560{
561 u16 region;
562 struct pci_bus_region bus_region;
563 struct resource *res = dev->resource + nr;
564
565 pci_read_config_word(dev, port, ®ion);
566 region &= ~(size - 1);
567
568 if (!region)
569 return;
570
571 res->name = pci_name(dev);
572 res->flags = IORESOURCE_IO;
573
574
575 bus_region.start = region;
576 bus_region.end = region + size - 1;
577 pcibios_bus_to_resource(dev->bus, res, &bus_region);
578
579 if (!pci_claim_resource(dev, nr))
580 pci_info(dev, "quirk: %pR claimed by %s\n", res, name);
581}
582
583
584
585
586
587static void quirk_ati_exploding_mce(struct pci_dev *dev)
588{
589 pci_info(dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
590
591 request_region(0x3b0, 0x0C, "RadeonIGP");
592 request_region(0x3d3, 0x01, "RadeonIGP");
593}
594DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce);
595
596
597
598
599
600
601
602
603
604
605
606
607static void quirk_amd_nl_class(struct pci_dev *pdev)
608{
609 u32 class = pdev->class;
610
611
612 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
613 pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
614 class, pdev->class);
615}
616DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
617 quirk_amd_nl_class);
618
619
620
621
622
623
624
625
626static void quirk_synopsys_haps(struct pci_dev *pdev)
627{
628 u32 class = pdev->class;
629
630 switch (pdev->device) {
631 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3:
632 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI:
633 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31:
634 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
635 pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
636 class, pdev->class);
637 break;
638 }
639}
640DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
641 PCI_CLASS_SERIAL_USB_XHCI, 0,
642 quirk_synopsys_haps);
643
644
645
646
647
648
649
650
651
652
653
654static void quirk_ali7101_acpi(struct pci_dev *dev)
655{
656 quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
657 quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
658}
659DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi);
660
661static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
662{
663 u32 devres;
664 u32 mask, size, base;
665
666 pci_read_config_dword(dev, port, &devres);
667 if ((devres & enable) != enable)
668 return;
669 mask = (devres >> 16) & 15;
670 base = devres & 0xffff;
671 size = 16;
672 for (;;) {
673 unsigned bit = size >> 1;
674 if ((bit & mask) == bit)
675 break;
676 size = bit;
677 }
678
679
680
681
682
683 base &= -size;
684 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
685}
686
687static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
688{
689 u32 devres;
690 u32 mask, size, base;
691
692 pci_read_config_dword(dev, port, &devres);
693 if ((devres & enable) != enable)
694 return;
695 base = devres & 0xffff0000;
696 mask = (devres & 0x3f) << 16;
697 size = 128 << 16;
698 for (;;) {
699 unsigned bit = size >> 1;
700 if ((bit & mask) == bit)
701 break;
702 size = bit;
703 }
704
705
706
707
708
709 base &= -size;
710 pci_info(dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
711}
712
713
714
715
716
717
718
719static void quirk_piix4_acpi(struct pci_dev *dev)
720{
721 u32 res_a;
722
723 quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
724 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
725
726
727 pci_read_config_dword(dev, 0x5c, &res_a);
728
729 piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
730 piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
731
732
733
734
735 if (res_a & (1 << 29)) {
736 piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
737 piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
738 }
739
740 if (res_a & (1 << 30)) {
741 piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
742 piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
743 }
744 piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
745 piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
746}
747DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi);
748DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi);
749
750#define ICH_PMBASE 0x40
751#define ICH_ACPI_CNTL 0x44
752#define ICH4_ACPI_EN 0x10
753#define ICH6_ACPI_EN 0x80
754#define ICH4_GPIOBASE 0x58
755#define ICH4_GPIO_CNTL 0x5c
756#define ICH4_GPIO_EN 0x10
757#define ICH6_GPIOBASE 0x48
758#define ICH6_GPIO_CNTL 0x4c
759#define ICH6_GPIO_EN 0x10
760
761
762
763
764
765
766static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
767{
768 u8 enable;
769
770
771
772
773
774
775
776
777 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
778 if (enable & ICH4_ACPI_EN)
779 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
780 "ICH4 ACPI/GPIO/TCO");
781
782 pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
783 if (enable & ICH4_GPIO_EN)
784 quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
785 "ICH4 GPIO");
786}
787DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
788DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
789DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi);
790DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi);
791DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi);
792DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi);
793DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi);
794DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi);
795DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi);
796DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi);
797
798static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
799{
800 u8 enable;
801
802 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
803 if (enable & ICH6_ACPI_EN)
804 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
805 "ICH6 ACPI/GPIO/TCO");
806
807 pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
808 if (enable & ICH6_GPIO_EN)
809 quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
810 "ICH6 GPIO");
811}
812
813static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
814 const char *name, int dynsize)
815{
816 u32 val;
817 u32 size, base;
818
819 pci_read_config_dword(dev, reg, &val);
820
821
822 if (!(val & 1))
823 return;
824 base = val & 0xfffc;
825 if (dynsize) {
826
827
828
829
830
831
832 size = 16;
833 } else {
834 size = 128;
835 }
836 base &= ~(size-1);
837
838
839
840
841
842 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
843}
844
845static void quirk_ich6_lpc(struct pci_dev *dev)
846{
847
848 ich6_lpc_acpi_gpio(dev);
849
850
851 ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
852 ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
853}
854DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
855DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
856
857static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
858 const char *name)
859{
860 u32 val;
861 u32 mask, base;
862
863 pci_read_config_dword(dev, reg, &val);
864
865
866 if (!(val & 1))
867 return;
868
869
870 base = val & 0xfffc;
871 mask = (val >> 16) & 0xfc;
872 mask |= 3;
873
874
875
876
877
878 pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
879}
880
881
882static void quirk_ich7_lpc(struct pci_dev *dev)
883{
884
885 ich6_lpc_acpi_gpio(dev);
886
887
888 ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
889 ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
890 ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
891 ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
892}
893DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc);
894DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc);
895DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc);
896DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc);
897DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc);
898DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc);
899DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc);
900DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc);
901DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc);
902DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc);
903DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc);
904DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc);
905DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc);
906
907
908
909
910
911static void quirk_vt82c586_acpi(struct pci_dev *dev)
912{
913 if (dev->revision & 0x10)
914 quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
915 "vt82c586 ACPI");
916}
917DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi);
918
919
920
921
922
923
924
925static void quirk_vt82c686_acpi(struct pci_dev *dev)
926{
927 quirk_vt82c586_acpi(dev);
928
929 quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
930 "vt82c686 HW-mon");
931
932 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
933}
934DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi);
935
936
937
938
939
940
941static void quirk_vt8235_acpi(struct pci_dev *dev)
942{
943 quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
944 quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
945}
946DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
947
948
949
950
951
952static void quirk_xio2000a(struct pci_dev *dev)
953{
954 struct pci_dev *pdev;
955 u16 command;
956
957 pci_warn(dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
958 list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
959 pci_read_config_word(pdev, PCI_COMMAND, &command);
960 if (command & PCI_COMMAND_FAST_BACK)
961 pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
962 }
963}
964DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
965 quirk_xio2000a);
966
967#ifdef CONFIG_X86_IO_APIC
968
969#include <asm/io_apic.h>
970
971
972
973
974
975
976
977
978static void quirk_via_ioapic(struct pci_dev *dev)
979{
980 u8 tmp;
981
982 if (nr_ioapics < 1)
983 tmp = 0;
984 else
985 tmp = 0x1f;
986
987 pci_info(dev, "%sbling VIA external APIC routing\n",
988 tmp == 0 ? "Disa" : "Ena");
989
990
991 pci_write_config_byte(dev, 0x58, tmp);
992}
993DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
994DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
995
996
997
998
999
1000
1001
1002static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
1003{
1004 u8 misc_control2;
1005#define BYPASS_APIC_DEASSERT 8
1006
1007 pci_read_config_byte(dev, 0x5B, &misc_control2);
1008 if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
1009 pci_info(dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
1010 pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
1011 }
1012}
1013DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
1014DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025static void quirk_amd_ioapic(struct pci_dev *dev)
1026{
1027 if (dev->revision >= 0x02) {
1028 pci_warn(dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
1029 pci_warn(dev, " : booting with the \"noapic\" option\n");
1030 }
1031}
1032DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
1033#endif
1034
1035#if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS)
1036
1037static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
1038{
1039
1040 if (dev->subsystem_device == 0xa118)
1041 dev->sriov->link = dev->devfn;
1042}
1043DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link);
1044#endif
1045
1046
1047
1048
1049
1050static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
1051{
1052 if (dev->subordinate && dev->revision <= 0x12) {
1053 pci_info(dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
1054 dev->revision);
1055 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
1056 }
1057}
1058DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
1059
1060
1061
1062
1063
1064
1065
1066
1067static void quirk_via_acpi(struct pci_dev *d)
1068{
1069 u8 irq;
1070
1071
1072 pci_read_config_byte(d, 0x42, &irq);
1073 irq &= 0xf;
1074 if (irq && (irq != 2))
1075 d->irq = irq;
1076}
1077DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi);
1078DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi);
1079
1080
1081static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
1082
1083static void quirk_via_bridge(struct pci_dev *dev)
1084{
1085
1086 switch (dev->device) {
1087 case PCI_DEVICE_ID_VIA_82C686:
1088
1089
1090
1091
1092
1093 via_vlink_dev_lo = PCI_SLOT(dev->devfn);
1094 via_vlink_dev_hi = PCI_SLOT(dev->devfn);
1095 break;
1096 case PCI_DEVICE_ID_VIA_8237:
1097 case PCI_DEVICE_ID_VIA_8237A:
1098 via_vlink_dev_lo = 15;
1099 break;
1100 case PCI_DEVICE_ID_VIA_8235:
1101 via_vlink_dev_lo = 16;
1102 break;
1103 case PCI_DEVICE_ID_VIA_8231:
1104 case PCI_DEVICE_ID_VIA_8233_0:
1105 case PCI_DEVICE_ID_VIA_8233A:
1106 case PCI_DEVICE_ID_VIA_8233C_0:
1107 via_vlink_dev_lo = 17;
1108 break;
1109 }
1110}
1111DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_bridge);
1112DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, quirk_via_bridge);
1113DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0, quirk_via_bridge);
1114DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A, quirk_via_bridge);
1115DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233C_0, quirk_via_bridge);
1116DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_bridge);
1117DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge);
1118DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge);
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131static void quirk_via_vlink(struct pci_dev *dev)
1132{
1133 u8 irq, new_irq;
1134
1135
1136 if (via_vlink_dev_lo == -1)
1137 return;
1138
1139 new_irq = dev->irq;
1140
1141
1142 if (!new_irq || new_irq > 15)
1143 return;
1144
1145
1146 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi ||
1147 PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
1148 return;
1149
1150
1151
1152
1153
1154 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1155 if (new_irq != irq) {
1156 pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n",
1157 irq, new_irq);
1158 udelay(15);
1159 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
1160 }
1161}
1162DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
1163
1164
1165
1166
1167
1168
1169static void quirk_vt82c598_id(struct pci_dev *dev)
1170{
1171 pci_write_config_byte(dev, 0xfc, 0);
1172 pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
1173}
1174DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id);
1175
1176
1177
1178
1179
1180
1181
1182static void quirk_cardbus_legacy(struct pci_dev *dev)
1183{
1184 pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
1185}
1186DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1187 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
1188DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
1189 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
1190
1191
1192
1193
1194
1195
1196
1197
1198static void quirk_amd_ordering(struct pci_dev *dev)
1199{
1200 u32 pcic;
1201 pci_read_config_dword(dev, 0x4C, &pcic);
1202 if ((pcic & 6) != 6) {
1203 pcic |= 6;
1204 pci_warn(dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
1205 pci_write_config_dword(dev, 0x4C, pcic);
1206 pci_read_config_dword(dev, 0x84, &pcic);
1207 pcic |= (1 << 23);
1208 pci_write_config_dword(dev, 0x84, pcic);
1209 }
1210}
1211DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1212DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1213
1214
1215
1216
1217
1218
1219
1220
1221static void quirk_dunord(struct pci_dev *dev)
1222{
1223 struct resource *r = &dev->resource[1];
1224
1225 r->flags |= IORESOURCE_UNSET;
1226 r->start = 0;
1227 r->end = 0xffffff;
1228}
1229DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
1230
1231
1232
1233
1234
1235
1236static void quirk_transparent_bridge(struct pci_dev *dev)
1237{
1238 dev->transparent = 1;
1239}
1240DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge);
1241DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge);
1242
1243
1244
1245
1246
1247
1248
1249static void quirk_mediagx_master(struct pci_dev *dev)
1250{
1251 u8 reg;
1252
1253 pci_read_config_byte(dev, 0x41, ®);
1254 if (reg & 2) {
1255 reg &= ~2;
1256 pci_info(dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
1257 reg);
1258 pci_write_config_byte(dev, 0x41, reg);
1259 }
1260}
1261DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1262DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1263
1264
1265
1266
1267
1268
1269static void quirk_disable_pxb(struct pci_dev *pdev)
1270{
1271 u16 config;
1272
1273 if (pdev->revision != 0x04)
1274 return;
1275 pci_read_config_word(pdev, 0x40, &config);
1276 if (config & (1<<6)) {
1277 config &= ~(1<<6);
1278 pci_write_config_word(pdev, 0x40, config);
1279 pci_info(pdev, "C0 revision 450NX. Disabling PCI restreaming\n");
1280 }
1281}
1282DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
1283DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
1284
1285static void quirk_amd_ide_mode(struct pci_dev *pdev)
1286{
1287
1288 u8 tmp;
1289
1290 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
1291 if (tmp == 0x01) {
1292 pci_read_config_byte(pdev, 0x40, &tmp);
1293 pci_write_config_byte(pdev, 0x40, tmp|1);
1294 pci_write_config_byte(pdev, 0x9, 1);
1295 pci_write_config_byte(pdev, 0xa, 6);
1296 pci_write_config_byte(pdev, 0x40, tmp);
1297
1298 pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
1299 pci_info(pdev, "set SATA to AHCI mode\n");
1300 }
1301}
1302DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1303DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1304DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1305DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1306DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1307DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1308DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1309DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1310
1311
1312static void quirk_svwks_csb5ide(struct pci_dev *pdev)
1313{
1314 u8 prog;
1315 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1316 if (prog & 5) {
1317 prog &= ~5;
1318 pdev->class &= ~5;
1319 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1320
1321 }
1322}
1323DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
1324
1325
1326static void quirk_ide_samemode(struct pci_dev *pdev)
1327{
1328 u8 prog;
1329
1330 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1331
1332 if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
1333 pci_info(pdev, "IDE mode mismatch; forcing legacy mode\n");
1334 prog &= ~5;
1335 pdev->class &= ~5;
1336 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1337 }
1338}
1339DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
1340
1341
1342static void quirk_no_ata_d3(struct pci_dev *pdev)
1343{
1344 pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
1345}
1346
1347DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID,
1348 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1349DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
1350 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1351
1352DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
1353 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1354
1355
1356DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
1357 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1358
1359
1360
1361
1362
1363static void quirk_eisa_bridge(struct pci_dev *dev)
1364{
1365 dev->class = PCI_CLASS_BRIDGE_EISA << 8;
1366}
1367DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge);
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394static int asus_hides_smbus;
1395
1396static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
1397{
1398 if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1399 if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
1400 switch (dev->subsystem_device) {
1401 case 0x8025:
1402 case 0x8070:
1403 case 0x8088:
1404 case 0x1626:
1405 asus_hides_smbus = 1;
1406 }
1407 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
1408 switch (dev->subsystem_device) {
1409 case 0x80b1:
1410 case 0x80b2:
1411 case 0x8093:
1412 asus_hides_smbus = 1;
1413 }
1414 else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
1415 switch (dev->subsystem_device) {
1416 case 0x8030:
1417 asus_hides_smbus = 1;
1418 }
1419 else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
1420 switch (dev->subsystem_device) {
1421 case 0x8070:
1422 asus_hides_smbus = 1;
1423 }
1424 else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
1425 switch (dev->subsystem_device) {
1426 case 0x80c9:
1427 asus_hides_smbus = 1;
1428 }
1429 else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
1430 switch (dev->subsystem_device) {
1431 case 0x1751:
1432 case 0x1821:
1433 case 0x1897:
1434 asus_hides_smbus = 1;
1435 }
1436 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1437 switch (dev->subsystem_device) {
1438 case 0x184b:
1439 case 0x186a:
1440 asus_hides_smbus = 1;
1441 }
1442 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1443 switch (dev->subsystem_device) {
1444 case 0x80f2:
1445 asus_hides_smbus = 1;
1446 }
1447 else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
1448 switch (dev->subsystem_device) {
1449 case 0x1882:
1450 case 0x1977:
1451 asus_hides_smbus = 1;
1452 }
1453 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
1454 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1455 switch (dev->subsystem_device) {
1456 case 0x088C:
1457 case 0x0890:
1458 asus_hides_smbus = 1;
1459 }
1460 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1461 switch (dev->subsystem_device) {
1462 case 0x12bc:
1463 case 0x12bd:
1464 case 0x006a:
1465 asus_hides_smbus = 1;
1466 }
1467 else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
1468 switch (dev->subsystem_device) {
1469 case 0x12bf:
1470 asus_hides_smbus = 1;
1471 }
1472 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
1473 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1474 switch (dev->subsystem_device) {
1475 case 0xC00C:
1476 asus_hides_smbus = 1;
1477 }
1478 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
1479 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1480 switch (dev->subsystem_device) {
1481 case 0x0058:
1482 asus_hides_smbus = 1;
1483 }
1484 else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
1485 switch (dev->subsystem_device) {
1486 case 0xB16C:
1487
1488
1489
1490 asus_hides_smbus = 1;
1491 }
1492 else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
1493 switch (dev->subsystem_device) {
1494 case 0x00b8:
1495 case 0x00b9:
1496 case 0x00ba:
1497
1498
1499
1500
1501
1502 asus_hides_smbus = 1;
1503 }
1504 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
1505 switch (dev->subsystem_device) {
1506 case 0x001A:
1507
1508
1509
1510 asus_hides_smbus = 1;
1511 }
1512 }
1513}
1514DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge);
1515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge);
1516DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge);
1517DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge);
1518DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, asus_hides_smbus_hostbridge);
1519DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge);
1520DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge);
1521DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge);
1522DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge);
1523DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
1524
1525DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
1526DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
1527DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
1528
1529static void asus_hides_smbus_lpc(struct pci_dev *dev)
1530{
1531 u16 val;
1532
1533 if (likely(!asus_hides_smbus))
1534 return;
1535
1536 pci_read_config_word(dev, 0xF2, &val);
1537 if (val & 0x8) {
1538 pci_write_config_word(dev, 0xF2, val & (~0x8));
1539 pci_read_config_word(dev, 0xF2, &val);
1540 if (val & 0x8)
1541 pci_info(dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
1542 val);
1543 else
1544 pci_info(dev, "Enabled i801 SMBus device\n");
1545 }
1546}
1547DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1548DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1549DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1550DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1551DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1552DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1553DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1554DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1555DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1556DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1557DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1558DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1559DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1560DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1561
1562
1563static void __iomem *asus_rcba_base;
1564static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
1565{
1566 u32 rcba;
1567
1568 if (likely(!asus_hides_smbus))
1569 return;
1570 WARN_ON(asus_rcba_base);
1571
1572 pci_read_config_dword(dev, 0xF0, &rcba);
1573
1574 asus_rcba_base = ioremap(rcba & 0xFFFFC000, 0x4000);
1575 if (asus_rcba_base == NULL)
1576 return;
1577}
1578
1579static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
1580{
1581 u32 val;
1582
1583 if (likely(!asus_hides_smbus || !asus_rcba_base))
1584 return;
1585
1586
1587 val = readl(asus_rcba_base + 0x3418);
1588
1589
1590 writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418);
1591}
1592
1593static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
1594{
1595 if (likely(!asus_hides_smbus || !asus_rcba_base))
1596 return;
1597
1598 iounmap(asus_rcba_base);
1599 asus_rcba_base = NULL;
1600 pci_info(dev, "Enabled ICH6/i801 SMBus device\n");
1601}
1602
1603static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1604{
1605 asus_hides_smbus_lpc_ich6_suspend(dev);
1606 asus_hides_smbus_lpc_ich6_resume_early(dev);
1607 asus_hides_smbus_lpc_ich6_resume(dev);
1608}
1609DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6);
1610DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend);
1611DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
1612DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
1613
1614
1615static void quirk_sis_96x_smbus(struct pci_dev *dev)
1616{
1617 u8 val = 0;
1618 pci_read_config_byte(dev, 0x77, &val);
1619 if (val & 0x10) {
1620 pci_info(dev, "Enabling SiS 96x SMBus\n");
1621 pci_write_config_byte(dev, 0x77, val & ~0x10);
1622 }
1623}
1624DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1625DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1626DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1627DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1628DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1629DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1630DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1631DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641#define SIS_DETECT_REGISTER 0x40
1642
1643static void quirk_sis_503(struct pci_dev *dev)
1644{
1645 u8 reg;
1646 u16 devid;
1647
1648 pci_read_config_byte(dev, SIS_DETECT_REGISTER, ®);
1649 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
1650 pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
1651 if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
1652 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
1653 return;
1654 }
1655
1656
1657
1658
1659
1660
1661 dev->device = devid;
1662 quirk_sis_96x_smbus(dev);
1663}
1664DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1665DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1666
1667
1668
1669
1670
1671
1672
1673static void asus_hides_ac97_lpc(struct pci_dev *dev)
1674{
1675 u8 val;
1676 int asus_hides_ac97 = 0;
1677
1678 if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1679 if (dev->device == PCI_DEVICE_ID_VIA_8237)
1680 asus_hides_ac97 = 1;
1681 }
1682
1683 if (!asus_hides_ac97)
1684 return;
1685
1686 pci_read_config_byte(dev, 0x50, &val);
1687 if (val & 0xc0) {
1688 pci_write_config_byte(dev, 0x50, val & (~0xc0));
1689 pci_read_config_byte(dev, 0x50, &val);
1690 if (val & 0xc0)
1691 pci_info(dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
1692 val);
1693 else
1694 pci_info(dev, "Enabled onboard AC97/MC97 devices\n");
1695 }
1696}
1697DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1698DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1699
1700#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
1701
1702
1703
1704
1705
1706static void quirk_jmicron_ata(struct pci_dev *pdev)
1707{
1708 u32 conf1, conf5, class;
1709 u8 hdr;
1710
1711
1712 if (PCI_FUNC(pdev->devfn))
1713 return;
1714
1715 pci_read_config_dword(pdev, 0x40, &conf1);
1716 pci_read_config_dword(pdev, 0x80, &conf5);
1717
1718 conf1 &= ~0x00CFF302;
1719 conf5 &= ~(1 << 24);
1720
1721 switch (pdev->device) {
1722 case PCI_DEVICE_ID_JMICRON_JMB360:
1723 case PCI_DEVICE_ID_JMICRON_JMB362:
1724 case PCI_DEVICE_ID_JMICRON_JMB364:
1725
1726 conf1 |= 0x0002A100;
1727 break;
1728
1729 case PCI_DEVICE_ID_JMICRON_JMB365:
1730 case PCI_DEVICE_ID_JMICRON_JMB366:
1731
1732 conf5 |= (1 << 24);
1733 fallthrough;
1734 case PCI_DEVICE_ID_JMICRON_JMB361:
1735 case PCI_DEVICE_ID_JMICRON_JMB363:
1736 case PCI_DEVICE_ID_JMICRON_JMB369:
1737
1738
1739 conf1 |= 0x00C2A1B3;
1740 break;
1741
1742 case PCI_DEVICE_ID_JMICRON_JMB368:
1743
1744 conf1 |= 0x00C00000;
1745 break;
1746 }
1747
1748 pci_write_config_dword(pdev, 0x40, conf1);
1749 pci_write_config_dword(pdev, 0x80, conf5);
1750
1751
1752 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
1753 pdev->hdr_type = hdr & 0x7f;
1754 pdev->multifunction = !!(hdr & 0x80);
1755
1756 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
1757 pdev->class = class >> 8;
1758}
1759DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1760DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1761DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1762DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1763DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1764DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1765DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1766DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1767DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1768DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1769DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1770DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1771DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1772DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1773DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1774DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1775DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1776DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1777
1778#endif
1779
1780static void quirk_jmicron_async_suspend(struct pci_dev *dev)
1781{
1782 if (dev->multifunction) {
1783 device_disable_async_suspend(&dev->dev);
1784 pci_info(dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
1785 }
1786}
1787DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend);
1788DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend);
1789DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend);
1790DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend);
1791
1792#ifdef CONFIG_X86_IO_APIC
1793static void quirk_alder_ioapic(struct pci_dev *pdev)
1794{
1795 int i;
1796
1797 if ((pdev->class >> 8) != 0xff00)
1798 return;
1799
1800
1801
1802
1803
1804
1805 if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
1806 insert_resource(&iomem_resource, &pdev->resource[0]);
1807
1808
1809
1810
1811
1812 for (i = 1; i < PCI_STD_NUM_BARS; i++)
1813 memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
1814}
1815DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
1816#endif
1817
1818static void quirk_pcie_mch(struct pci_dev *pdev)
1819{
1820 pdev->no_msi = 1;
1821}
1822DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
1823DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
1824DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
1825
1826DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
1827
1828
1829
1830
1831
1832static void quirk_pcie_pxh(struct pci_dev *dev)
1833{
1834 dev->no_msi = 1;
1835 pci_warn(dev, "PXH quirk detected; SHPC device MSI disabled\n");
1836}
1837DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh);
1838DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh);
1839DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh);
1840DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh);
1841DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
1842
1843
1844
1845
1846
1847static void quirk_intel_pcie_pm(struct pci_dev *dev)
1848{
1849 pci_pm_d3hot_delay = 120;
1850 dev->no_d1d2 = 1;
1851}
1852DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
1853DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm);
1854DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm);
1855DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm);
1856DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm);
1857DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm);
1858DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm);
1859DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm);
1860DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm);
1861DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm);
1862DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm);
1863DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm);
1864DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm);
1865DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm);
1866DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm);
1867DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm);
1868DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm);
1869DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm);
1870DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
1871DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
1872DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
1873
1874static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay)
1875{
1876 if (dev->d3hot_delay >= delay)
1877 return;
1878
1879 dev->d3hot_delay = delay;
1880 pci_info(dev, "extending delay after power-on from D3hot to %d msec\n",
1881 dev->d3hot_delay);
1882}
1883
1884static void quirk_radeon_pm(struct pci_dev *dev)
1885{
1886 if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1887 dev->subsystem_device == 0x00e2)
1888 quirk_d3hot_delay(dev, 20);
1889}
1890DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
1902{
1903 quirk_d3hot_delay(dev, 20);
1904}
1905DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
1906DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
1907
1908#ifdef CONFIG_X86_IO_APIC
1909static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
1910{
1911 noioapicreroute = 1;
1912 pr_info("%s detected: disable boot interrupt reroute\n", d->ident);
1913
1914 return 0;
1915}
1916
1917static const struct dmi_system_id boot_interrupt_dmi_table[] = {
1918
1919
1920
1921 {
1922 .callback = dmi_disable_ioapicreroute,
1923 .ident = "ASUSTek Computer INC. M2N-LR",
1924 .matches = {
1925 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer INC."),
1926 DMI_MATCH(DMI_PRODUCT_NAME, "M2N-LR"),
1927 },
1928 },
1929 {}
1930};
1931
1932
1933
1934
1935
1936
1937
1938static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
1939{
1940 dmi_check_system(boot_interrupt_dmi_table);
1941 if (noioapicquirk || noioapicreroute)
1942 return;
1943
1944 dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
1945 pci_info(dev, "rerouting interrupts for [%04x:%04x]\n",
1946 dev->vendor, dev->device);
1947}
1948DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1949DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1950DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1951DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1952DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1953DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1954DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1955DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1956DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1957DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1958DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1959DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1960DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1961DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1962DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1963DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984#define INTEL_6300_IOAPIC_ABAR 0x40
1985#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
1986
1987#define INTEL_CIPINTRC_CFG_OFFSET 0x14C
1988#define INTEL_CIPINTRC_DIS_INTX_ICH (1<<25)
1989
1990static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
1991{
1992 u16 pci_config_word;
1993 u32 pci_config_dword;
1994
1995 if (noioapicquirk)
1996 return;
1997
1998 switch (dev->device) {
1999 case PCI_DEVICE_ID_INTEL_ESB_10:
2000 pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR,
2001 &pci_config_word);
2002 pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
2003 pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR,
2004 pci_config_word);
2005 break;
2006 case 0x3c28:
2007 case 0x0e28:
2008 case 0x2f28:
2009 case 0x6f28:
2010 case 0x2034:
2011 pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
2012 &pci_config_dword);
2013 pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH;
2014 pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
2015 pci_config_dword);
2016 break;
2017 default:
2018 return;
2019 }
2020 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2021 dev->vendor, dev->device);
2022}
2023
2024
2025
2026
2027DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
2028 quirk_disable_intel_boot_interrupt);
2029DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
2030 quirk_disable_intel_boot_interrupt);
2031
2032
2033
2034
2035
2036
2037
2038
2039DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x3c28,
2040 quirk_disable_intel_boot_interrupt);
2041DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0e28,
2042 quirk_disable_intel_boot_interrupt);
2043DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2f28,
2044 quirk_disable_intel_boot_interrupt);
2045DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x6f28,
2046 quirk_disable_intel_boot_interrupt);
2047DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2034,
2048 quirk_disable_intel_boot_interrupt);
2049DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x3c28,
2050 quirk_disable_intel_boot_interrupt);
2051DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x0e28,
2052 quirk_disable_intel_boot_interrupt);
2053DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2f28,
2054 quirk_disable_intel_boot_interrupt);
2055DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x6f28,
2056 quirk_disable_intel_boot_interrupt);
2057DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2034,
2058 quirk_disable_intel_boot_interrupt);
2059
2060
2061#define BC_HT1000_FEATURE_REG 0x64
2062#define BC_HT1000_PIC_REGS_ENABLE (1<<0)
2063#define BC_HT1000_MAP_IDX 0xC00
2064#define BC_HT1000_MAP_DATA 0xC01
2065
2066static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
2067{
2068 u32 pci_config_dword;
2069 u8 irq;
2070
2071 if (noioapicquirk)
2072 return;
2073
2074 pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
2075 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
2076 BC_HT1000_PIC_REGS_ENABLE);
2077
2078 for (irq = 0x10; irq < 0x10 + 32; irq++) {
2079 outb(irq, BC_HT1000_MAP_IDX);
2080 outb(0x00, BC_HT1000_MAP_DATA);
2081 }
2082
2083 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
2084
2085 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2086 dev->vendor, dev->device);
2087}
2088DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
2089DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
2090
2091
2092
2093
2094
2095
2096
2097
2098#define AMD_813X_MISC 0x40
2099#define AMD_813X_NOIOAMODE (1<<0)
2100#define AMD_813X_REV_B1 0x12
2101#define AMD_813X_REV_B2 0x13
2102
2103static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
2104{
2105 u32 pci_config_dword;
2106
2107 if (noioapicquirk)
2108 return;
2109 if ((dev->revision == AMD_813X_REV_B1) ||
2110 (dev->revision == AMD_813X_REV_B2))
2111 return;
2112
2113 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
2114 pci_config_dword &= ~AMD_813X_NOIOAMODE;
2115 pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
2116
2117 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2118 dev->vendor, dev->device);
2119}
2120DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2121DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2122DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2123DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2124
2125#define AMD_8111_PCI_IRQ_ROUTING 0x56
2126
2127static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
2128{
2129 u16 pci_config_word;
2130
2131 if (noioapicquirk)
2132 return;
2133
2134 pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
2135 if (!pci_config_word) {
2136 pci_info(dev, "boot interrupts on device [%04x:%04x] already disabled\n",
2137 dev->vendor, dev->device);
2138 return;
2139 }
2140 pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
2141 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2142 dev->vendor, dev->device);
2143}
2144DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
2145DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
2146#endif
2147
2148
2149
2150
2151
2152
2153static void quirk_tc86c001_ide(struct pci_dev *dev)
2154{
2155 struct resource *r = &dev->resource[0];
2156
2157 if (r->start & 0x8) {
2158 r->flags |= IORESOURCE_UNSET;
2159 r->start = 0;
2160 r->end = 0xf;
2161 }
2162}
2163DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
2164 PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
2165 quirk_tc86c001_ide);
2166
2167
2168
2169
2170
2171
2172
2173
2174static void quirk_plx_pci9050(struct pci_dev *dev)
2175{
2176 unsigned int bar;
2177
2178
2179 if (dev->revision >= 2)
2180 return;
2181 for (bar = 0; bar <= 1; bar++)
2182 if (pci_resource_len(dev, bar) == 0x80 &&
2183 (pci_resource_start(dev, bar) & 0x80)) {
2184 struct resource *r = &dev->resource[bar];
2185 pci_info(dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
2186 bar);
2187 r->flags |= IORESOURCE_UNSET;
2188 r->start = 0;
2189 r->end = 0xff;
2190 }
2191}
2192DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
2193 quirk_plx_pci9050);
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050);
2204DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050);
2205
2206static void quirk_netmos(struct pci_dev *dev)
2207{
2208 unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
2209 unsigned int num_serial = dev->subsystem_device & 0xf;
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221 switch (dev->device) {
2222 case PCI_DEVICE_ID_NETMOS_9835:
2223
2224 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
2225 dev->subsystem_device == 0x0299)
2226 return;
2227 fallthrough;
2228 case PCI_DEVICE_ID_NETMOS_9735:
2229 case PCI_DEVICE_ID_NETMOS_9745:
2230 case PCI_DEVICE_ID_NETMOS_9845:
2231 case PCI_DEVICE_ID_NETMOS_9855:
2232 if (num_parallel) {
2233 pci_info(dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
2234 dev->device, num_parallel, num_serial);
2235 dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
2236 (dev->class & 0xff);
2237 }
2238 }
2239}
2240DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
2241 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
2242
2243static void quirk_e100_interrupt(struct pci_dev *dev)
2244{
2245 u16 command, pmcsr;
2246 u8 __iomem *csr;
2247 u8 cmd_hi;
2248
2249 switch (dev->device) {
2250
2251 case 0x1029:
2252 case 0x1030 ... 0x1034:
2253 case 0x1038 ... 0x103E:
2254 case 0x1050 ... 0x1057:
2255 case 0x1059:
2256 case 0x1064 ... 0x106B:
2257 case 0x1091 ... 0x1095:
2258 case 0x1209:
2259 case 0x1229:
2260 case 0x2449:
2261 case 0x2459:
2262 case 0x245D:
2263 case 0x27DC:
2264 break;
2265 default:
2266 return;
2267 }
2268
2269
2270
2271
2272
2273
2274
2275
2276 pci_read_config_word(dev, PCI_COMMAND, &command);
2277
2278 if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
2279 return;
2280
2281
2282
2283
2284
2285 if (dev->pm_cap) {
2286 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2287 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
2288 return;
2289 }
2290
2291
2292 csr = ioremap(pci_resource_start(dev, 0), 8);
2293 if (!csr) {
2294 pci_warn(dev, "Can't map e100 registers\n");
2295 return;
2296 }
2297
2298 cmd_hi = readb(csr + 3);
2299 if (cmd_hi == 0) {
2300 pci_warn(dev, "Firmware left e100 interrupts enabled; disabling\n");
2301 writeb(1, csr + 3);
2302 }
2303
2304 iounmap(csr);
2305}
2306DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
2307 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt);
2308
2309
2310
2311
2312
2313static void quirk_disable_aspm_l0s(struct pci_dev *dev)
2314{
2315 pci_info(dev, "Disabling L0s\n");
2316 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
2317}
2318DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
2319DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
2320DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
2321DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
2322DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
2323DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
2324DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
2325DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
2326DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
2327DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
2328DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
2329DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
2330DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
2331DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
2332
2333static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
2334{
2335 pci_info(dev, "Disabling ASPM L0s/L1\n");
2336 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
2337}
2338
2339
2340
2341
2342
2343
2344DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
2355{
2356 dev->clear_retrain_link = 1;
2357 pci_info(dev, "Enable PCIe Retrain Link quirk\n");
2358}
2359DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe110, quirk_enable_clear_retrain_link);
2360DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe111, quirk_enable_clear_retrain_link);
2361DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe130, quirk_enable_clear_retrain_link);
2362
2363static void fixup_rev1_53c810(struct pci_dev *dev)
2364{
2365 u32 class = dev->class;
2366
2367
2368
2369
2370
2371 if (class)
2372 return;
2373
2374 dev->class = PCI_CLASS_STORAGE_SCSI << 8;
2375 pci_info(dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n",
2376 class, dev->class);
2377}
2378DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
2379
2380
2381static void quirk_p64h2_1k_io(struct pci_dev *dev)
2382{
2383 u16 en1k;
2384
2385 pci_read_config_word(dev, 0x40, &en1k);
2386
2387 if (en1k & 0x200) {
2388 pci_info(dev, "Enable I/O Space to 1KB granularity\n");
2389 dev->io_window_1k = 1;
2390 }
2391}
2392DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
2393
2394
2395
2396
2397
2398
2399static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
2400{
2401 uint8_t b;
2402
2403 if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
2404 if (!(b & 0x20)) {
2405 pci_write_config_byte(dev, 0xf41, b | 0x20);
2406 pci_info(dev, "Linking AER extended capability\n");
2407 }
2408 }
2409}
2410DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2411 quirk_nvidia_ck804_pcie_aer_ext_cap);
2412DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2413 quirk_nvidia_ck804_pcie_aer_ext_cap);
2414
2415static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
2416{
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428 struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,
2429 PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
2430 uint8_t b;
2431
2432
2433
2434
2435
2436 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
2437 if (!p)
2438 return;
2439 pci_dev_put(p);
2440
2441 if (pci_read_config_byte(dev, 0x76, &b) == 0) {
2442 if (b & 0x40) {
2443
2444 pci_write_config_byte(dev, 0x76, b ^ 0x40);
2445
2446 pci_info(dev, "Disabling VIA CX700 PCI parking\n");
2447 }
2448 }
2449
2450 if (pci_read_config_byte(dev, 0x72, &b) == 0) {
2451 if (b != 0) {
2452
2453 pci_write_config_byte(dev, 0x72, 0x0);
2454
2455
2456 pci_write_config_byte(dev, 0x75, 0x1);
2457
2458
2459 pci_write_config_byte(dev, 0x77, 0x0);
2460
2461 pci_info(dev, "Disabling VIA CX700 PCI caching\n");
2462 }
2463 }
2464}
2465DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
2466
2467static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
2468{
2469 u32 rev;
2470
2471 pci_read_config_dword(dev, 0xf4, &rev);
2472
2473
2474 if (rev == 0x05719000) {
2475 int readrq = pcie_get_readrq(dev);
2476 if (readrq > 2048)
2477 pcie_set_readrq(dev, 2048);
2478 }
2479}
2480DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
2481 PCI_DEVICE_ID_TIGON3_5719,
2482 quirk_brcm_5719_limit_mrrs);
2483
2484
2485
2486
2487
2488
2489
2490static void quirk_unhide_mch_dev6(struct pci_dev *dev)
2491{
2492 u8 reg;
2493
2494 if (pci_read_config_byte(dev, 0xF4, ®) == 0 && !(reg & 0x02)) {
2495 pci_info(dev, "Enabling MCH 'Overflow' Device\n");
2496 pci_write_config_byte(dev, 0xF4, reg | 0x02);
2497 }
2498}
2499DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
2500 quirk_unhide_mch_dev6);
2501DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
2502 quirk_unhide_mch_dev6);
2503
2504#ifdef CONFIG_PCI_MSI
2505
2506
2507
2508
2509
2510
2511
2512static void quirk_disable_all_msi(struct pci_dev *dev)
2513{
2514 pci_no_msi();
2515 pci_warn(dev, "MSI quirk detected; MSI disabled\n");
2516}
2517DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
2518DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
2519DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
2520DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
2521DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
2522DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
2523DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
2524DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
2525DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SAMSUNG, 0xa5e3, quirk_disable_all_msi);
2526
2527
2528static void quirk_disable_msi(struct pci_dev *dev)
2529{
2530 if (dev->subordinate) {
2531 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2532 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2533 }
2534}
2535DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
2536DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
2537DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
2538
2539
2540
2541
2542
2543
2544
2545static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
2546{
2547 struct pci_dev *apc_bridge;
2548
2549 apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0));
2550 if (apc_bridge) {
2551 if (apc_bridge->device == 0x9602)
2552 quirk_disable_msi(apc_bridge);
2553 pci_dev_put(apc_bridge);
2554 }
2555}
2556DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
2557DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
2558
2559
2560
2561
2562
2563static int msi_ht_cap_enabled(struct pci_dev *dev)
2564{
2565 int pos, ttl = PCI_FIND_CAP_TTL;
2566
2567 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2568 while (pos && ttl--) {
2569 u8 flags;
2570
2571 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2572 &flags) == 0) {
2573 pci_info(dev, "Found %s HT MSI Mapping\n",
2574 flags & HT_MSI_FLAGS_ENABLE ?
2575 "enabled" : "disabled");
2576 return (flags & HT_MSI_FLAGS_ENABLE) != 0;
2577 }
2578
2579 pos = pci_find_next_ht_capability(dev, pos,
2580 HT_CAPTYPE_MSI_MAPPING);
2581 }
2582 return 0;
2583}
2584
2585
2586static void quirk_msi_ht_cap(struct pci_dev *dev)
2587{
2588 if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
2589 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2590 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2591 }
2592}
2593DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
2594 quirk_msi_ht_cap);
2595
2596
2597
2598
2599
2600static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
2601{
2602 struct pci_dev *pdev;
2603
2604 if (!dev->subordinate)
2605 return;
2606
2607
2608
2609
2610
2611 pdev = pci_get_slot(dev->bus, 0);
2612 if (!pdev)
2613 return;
2614 if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
2615 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2616 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2617 }
2618 pci_dev_put(pdev);
2619}
2620DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2621 quirk_nvidia_ck804_msi_ht_cap);
2622
2623
2624static void ht_enable_msi_mapping(struct pci_dev *dev)
2625{
2626 int pos, ttl = PCI_FIND_CAP_TTL;
2627
2628 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2629 while (pos && ttl--) {
2630 u8 flags;
2631
2632 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2633 &flags) == 0) {
2634 pci_info(dev, "Enabling HT MSI Mapping\n");
2635
2636 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2637 flags | HT_MSI_FLAGS_ENABLE);
2638 }
2639 pos = pci_find_next_ht_capability(dev, pos,
2640 HT_CAPTYPE_MSI_MAPPING);
2641 }
2642}
2643DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
2644 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
2645 ht_enable_msi_mapping);
2646DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
2647 ht_enable_msi_mapping);
2648
2649
2650
2651
2652
2653
2654static void nvenet_msi_disable(struct pci_dev *dev)
2655{
2656 const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
2657
2658 if (board_name &&
2659 (strstr(board_name, "P5N32-SLI PREMIUM") ||
2660 strstr(board_name, "P5N32-E SLI"))) {
2661 pci_info(dev, "Disabling MSI for MCP55 NIC on P5N32-SLI\n");
2662 dev->no_msi = 1;
2663 }
2664}
2665DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2666 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2667 nvenet_msi_disable);
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678static void pci_quirk_nvidia_tegra_disable_rp_msi(struct pci_dev *dev)
2679{
2680 dev->no_msi = 1;
2681}
2682DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad0,
2683 PCI_CLASS_BRIDGE_PCI, 8,
2684 pci_quirk_nvidia_tegra_disable_rp_msi);
2685DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad1,
2686 PCI_CLASS_BRIDGE_PCI, 8,
2687 pci_quirk_nvidia_tegra_disable_rp_msi);
2688DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad2,
2689 PCI_CLASS_BRIDGE_PCI, 8,
2690 pci_quirk_nvidia_tegra_disable_rp_msi);
2691DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0,
2692 PCI_CLASS_BRIDGE_PCI, 8,
2693 pci_quirk_nvidia_tegra_disable_rp_msi);
2694DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1,
2695 PCI_CLASS_BRIDGE_PCI, 8,
2696 pci_quirk_nvidia_tegra_disable_rp_msi);
2697DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c,
2698 PCI_CLASS_BRIDGE_PCI, 8,
2699 pci_quirk_nvidia_tegra_disable_rp_msi);
2700DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d,
2701 PCI_CLASS_BRIDGE_PCI, 8,
2702 pci_quirk_nvidia_tegra_disable_rp_msi);
2703DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e12,
2704 PCI_CLASS_BRIDGE_PCI, 8,
2705 pci_quirk_nvidia_tegra_disable_rp_msi);
2706DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e13,
2707 PCI_CLASS_BRIDGE_PCI, 8,
2708 pci_quirk_nvidia_tegra_disable_rp_msi);
2709DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0fae,
2710 PCI_CLASS_BRIDGE_PCI, 8,
2711 pci_quirk_nvidia_tegra_disable_rp_msi);
2712DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0faf,
2713 PCI_CLASS_BRIDGE_PCI, 8,
2714 pci_quirk_nvidia_tegra_disable_rp_msi);
2715DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e5,
2716 PCI_CLASS_BRIDGE_PCI, 8,
2717 pci_quirk_nvidia_tegra_disable_rp_msi);
2718DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e6,
2719 PCI_CLASS_BRIDGE_PCI, 8,
2720 pci_quirk_nvidia_tegra_disable_rp_msi);
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
2733{
2734 u32 cfg;
2735
2736 if (!pci_find_capability(dev, PCI_CAP_ID_HT))
2737 return;
2738
2739 pci_read_config_dword(dev, 0x74, &cfg);
2740
2741 if (cfg & ((1 << 2) | (1 << 15))) {
2742 pr_info("Rewriting IRQ routing register on MCP55\n");
2743 cfg &= ~((1 << 2) | (1 << 15));
2744 pci_write_config_dword(dev, 0x74, cfg);
2745 }
2746}
2747DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2748 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
2749 nvbridge_check_legacy_irq_routing);
2750DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2751 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
2752 nvbridge_check_legacy_irq_routing);
2753
2754static int ht_check_msi_mapping(struct pci_dev *dev)
2755{
2756 int pos, ttl = PCI_FIND_CAP_TTL;
2757 int found = 0;
2758
2759
2760 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2761 while (pos && ttl--) {
2762 u8 flags;
2763
2764 if (found < 1)
2765 found = 1;
2766 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2767 &flags) == 0) {
2768 if (flags & HT_MSI_FLAGS_ENABLE) {
2769 if (found < 2) {
2770 found = 2;
2771 break;
2772 }
2773 }
2774 }
2775 pos = pci_find_next_ht_capability(dev, pos,
2776 HT_CAPTYPE_MSI_MAPPING);
2777 }
2778
2779 return found;
2780}
2781
2782static int host_bridge_with_leaf(struct pci_dev *host_bridge)
2783{
2784 struct pci_dev *dev;
2785 int pos;
2786 int i, dev_no;
2787 int found = 0;
2788
2789 dev_no = host_bridge->devfn >> 3;
2790 for (i = dev_no + 1; i < 0x20; i++) {
2791 dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
2792 if (!dev)
2793 continue;
2794
2795
2796 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2797 if (pos != 0) {
2798 pci_dev_put(dev);
2799 break;
2800 }
2801
2802 if (ht_check_msi_mapping(dev)) {
2803 found = 1;
2804 pci_dev_put(dev);
2805 break;
2806 }
2807 pci_dev_put(dev);
2808 }
2809
2810 return found;
2811}
2812
2813#define PCI_HT_CAP_SLAVE_CTRL0 4
2814#define PCI_HT_CAP_SLAVE_CTRL1 8
2815
2816static int is_end_of_ht_chain(struct pci_dev *dev)
2817{
2818 int pos, ctrl_off;
2819 int end = 0;
2820 u16 flags, ctrl;
2821
2822 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2823
2824 if (!pos)
2825 goto out;
2826
2827 pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
2828
2829 ctrl_off = ((flags >> 10) & 1) ?
2830 PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
2831 pci_read_config_word(dev, pos + ctrl_off, &ctrl);
2832
2833 if (ctrl & (1 << 6))
2834 end = 1;
2835
2836out:
2837 return end;
2838}
2839
2840static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
2841{
2842 struct pci_dev *host_bridge;
2843 int pos;
2844 int i, dev_no;
2845 int found = 0;
2846
2847 dev_no = dev->devfn >> 3;
2848 for (i = dev_no; i >= 0; i--) {
2849 host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
2850 if (!host_bridge)
2851 continue;
2852
2853 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2854 if (pos != 0) {
2855 found = 1;
2856 break;
2857 }
2858 pci_dev_put(host_bridge);
2859 }
2860
2861 if (!found)
2862 return;
2863
2864
2865 if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
2866 host_bridge_with_leaf(host_bridge))
2867 goto out;
2868
2869
2870 if (msi_ht_cap_enabled(host_bridge))
2871 goto out;
2872
2873 ht_enable_msi_mapping(dev);
2874
2875out:
2876 pci_dev_put(host_bridge);
2877}
2878
2879static void ht_disable_msi_mapping(struct pci_dev *dev)
2880{
2881 int pos, ttl = PCI_FIND_CAP_TTL;
2882
2883 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2884 while (pos && ttl--) {
2885 u8 flags;
2886
2887 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2888 &flags) == 0) {
2889 pci_info(dev, "Disabling HT MSI Mapping\n");
2890
2891 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2892 flags & ~HT_MSI_FLAGS_ENABLE);
2893 }
2894 pos = pci_find_next_ht_capability(dev, pos,
2895 HT_CAPTYPE_MSI_MAPPING);
2896 }
2897}
2898
2899static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
2900{
2901 struct pci_dev *host_bridge;
2902 int pos;
2903 int found;
2904
2905 if (!pci_msi_enabled())
2906 return;
2907
2908
2909 found = ht_check_msi_mapping(dev);
2910
2911
2912 if (found == 0)
2913 return;
2914
2915
2916
2917
2918
2919 host_bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), 0,
2920 PCI_DEVFN(0, 0));
2921 if (host_bridge == NULL) {
2922 pci_warn(dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
2923 return;
2924 }
2925
2926 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2927 if (pos != 0) {
2928
2929 if (found == 1) {
2930
2931 if (all)
2932 ht_enable_msi_mapping(dev);
2933 else
2934 nv_ht_enable_msi_mapping(dev);
2935 }
2936 goto out;
2937 }
2938
2939
2940 if (found == 1)
2941 goto out;
2942
2943
2944 ht_disable_msi_mapping(dev);
2945
2946out:
2947 pci_dev_put(host_bridge);
2948}
2949
2950static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
2951{
2952 return __nv_msi_ht_cap_quirk(dev, 1);
2953}
2954DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2955DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2956
2957static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
2958{
2959 return __nv_msi_ht_cap_quirk(dev, 0);
2960}
2961DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2962DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2963
2964static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
2965{
2966 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2967}
2968
2969static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
2970{
2971 struct pci_dev *p;
2972
2973
2974
2975
2976
2977
2978 p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
2979 NULL);
2980 if (!p)
2981 return;
2982
2983 if ((p->revision < 0x3B) && (p->revision >= 0x30))
2984 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2985 pci_dev_put(p);
2986}
2987
2988static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
2989{
2990
2991 if (dev->revision < 0x18) {
2992 pci_info(dev, "set MSI_INTX_DISABLE_BUG flag\n");
2993 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2994 }
2995}
2996DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2997 PCI_DEVICE_ID_TIGON3_5780,
2998 quirk_msi_intx_disable_bug);
2999DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3000 PCI_DEVICE_ID_TIGON3_5780S,
3001 quirk_msi_intx_disable_bug);
3002DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3003 PCI_DEVICE_ID_TIGON3_5714,
3004 quirk_msi_intx_disable_bug);
3005DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3006 PCI_DEVICE_ID_TIGON3_5714S,
3007 quirk_msi_intx_disable_bug);
3008DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3009 PCI_DEVICE_ID_TIGON3_5715,
3010 quirk_msi_intx_disable_bug);
3011DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3012 PCI_DEVICE_ID_TIGON3_5715S,
3013 quirk_msi_intx_disable_bug);
3014
3015DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390,
3016 quirk_msi_intx_disable_ati_bug);
3017DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391,
3018 quirk_msi_intx_disable_ati_bug);
3019DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392,
3020 quirk_msi_intx_disable_ati_bug);
3021DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393,
3022 quirk_msi_intx_disable_ati_bug);
3023DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394,
3024 quirk_msi_intx_disable_ati_bug);
3025
3026DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373,
3027 quirk_msi_intx_disable_bug);
3028DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
3029 quirk_msi_intx_disable_bug);
3030DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
3031 quirk_msi_intx_disable_bug);
3032
3033DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
3034 quirk_msi_intx_disable_bug);
3035DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
3036 quirk_msi_intx_disable_bug);
3037DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
3038 quirk_msi_intx_disable_bug);
3039DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
3040 quirk_msi_intx_disable_bug);
3041DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
3042 quirk_msi_intx_disable_bug);
3043DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
3044 quirk_msi_intx_disable_bug);
3045DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090,
3046 quirk_msi_intx_disable_qca_bug);
3047DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091,
3048 quirk_msi_intx_disable_qca_bug);
3049DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0,
3050 quirk_msi_intx_disable_qca_bug);
3051DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
3052 quirk_msi_intx_disable_qca_bug);
3053DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
3054 quirk_msi_intx_disable_qca_bug);
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066static void quirk_al_msi_disable(struct pci_dev *dev)
3067{
3068 dev->no_msi = 1;
3069 pci_warn(dev, "Disabling MSI/MSI-X\n");
3070}
3071DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
3072 PCI_CLASS_BRIDGE_PCI, 8, quirk_al_msi_disable);
3073#endif
3074
3075
3076
3077
3078
3079
3080
3081
3082static void quirk_hotplug_bridge(struct pci_dev *dev)
3083{
3084 dev->is_hotplug_bridge = 1;
3085}
3086DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113#ifdef CONFIG_MMC_RICOH_MMC
3114static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
3115{
3116 u8 write_enable;
3117 u8 write_target;
3118 u8 disable;
3119
3120
3121
3122
3123
3124
3125 if (PCI_FUNC(dev->devfn))
3126 return;
3127
3128 pci_read_config_byte(dev, 0xB7, &disable);
3129 if (disable & 0x02)
3130 return;
3131
3132 pci_read_config_byte(dev, 0x8E, &write_enable);
3133 pci_write_config_byte(dev, 0x8E, 0xAA);
3134 pci_read_config_byte(dev, 0x8D, &write_target);
3135 pci_write_config_byte(dev, 0x8D, 0xB7);
3136 pci_write_config_byte(dev, 0xB7, disable | 0x02);
3137 pci_write_config_byte(dev, 0x8E, write_enable);
3138 pci_write_config_byte(dev, 0x8D, write_target);
3139
3140 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via CardBus function)\n");
3141 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
3142}
3143DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
3144DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
3145
3146static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
3147{
3148 u8 write_enable;
3149 u8 disable;
3150
3151
3152
3153
3154
3155
3156 if (PCI_FUNC(dev->devfn))
3157 return;
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170 if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
3171 dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
3172 pci_write_config_byte(dev, 0xf9, 0xfc);
3173 pci_write_config_byte(dev, 0x150, 0x10);
3174 pci_write_config_byte(dev, 0xf9, 0x00);
3175 pci_write_config_byte(dev, 0xfc, 0x01);
3176 pci_write_config_byte(dev, 0xe1, 0x32);
3177 pci_write_config_byte(dev, 0xfc, 0x00);
3178
3179 pci_notice(dev, "MMC controller base frequency changed to 50Mhz.\n");
3180 }
3181
3182 pci_read_config_byte(dev, 0xCB, &disable);
3183
3184 if (disable & 0x02)
3185 return;
3186
3187 pci_read_config_byte(dev, 0xCA, &write_enable);
3188 pci_write_config_byte(dev, 0xCA, 0x57);
3189 pci_write_config_byte(dev, 0xCB, disable | 0x02);
3190 pci_write_config_byte(dev, 0xCA, write_enable);
3191
3192 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via FireWire function)\n");
3193 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
3194
3195}
3196DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
3197DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
3198DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
3199DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
3200DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
3201DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
3202#endif
3203
3204#ifdef CONFIG_DMAR_TABLE
3205#define VTUNCERRMSK_REG 0x1ac
3206#define VTD_MSK_SPEC_ERRORS (1 << 31)
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217static void vtd_mask_spec_errors(struct pci_dev *dev)
3218{
3219 u32 word;
3220
3221 pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
3222 pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
3223}
3224DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
3225DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
3226#endif
3227
3228static void fixup_ti816x_class(struct pci_dev *dev)
3229{
3230 u32 class = dev->class;
3231
3232
3233 dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
3234 pci_info(dev, "PCI class overridden (%#08x -> %#08x)\n",
3235 class, dev->class);
3236}
3237DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
3238 PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
3239
3240
3241
3242
3243
3244static void fixup_mpss_256(struct pci_dev *dev)
3245{
3246 dev->pcie_mpss = 1;
3247}
3248DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3249 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
3250DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3251 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
3252DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3253 PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263static void quirk_intel_mc_errata(struct pci_dev *dev)
3264{
3265 int err;
3266 u16 rcc;
3267
3268 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
3269 pcie_bus_config == PCIE_BUS_DEFAULT)
3270 return;
3271
3272
3273
3274
3275
3276
3277 err = pci_read_config_word(dev, 0x48, &rcc);
3278 if (err) {
3279 pci_err(dev, "Error attempting to read the read completion coalescing register\n");
3280 return;
3281 }
3282
3283 if (!(rcc & (1 << 10)))
3284 return;
3285
3286 rcc &= ~(1 << 10);
3287
3288 err = pci_write_config_word(dev, 0x48, rcc);
3289 if (err) {
3290 pci_err(dev, "Error attempting to write the read completion coalescing register\n");
3291 return;
3292 }
3293
3294 pr_info_once("Read completion coalescing disabled due to hardware erratum relating to 256B MPS\n");
3295}
3296
3297DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
3298DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
3299DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
3300DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
3301DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
3302DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
3303DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
3304DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
3305DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
3306DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
3307DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
3308DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
3309DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
3310DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
3311
3312DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
3313DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
3314DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
3315DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
3316DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
3317DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
3318DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
3319DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
3320DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
3321DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
3322DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
3323
3324
3325
3326
3327
3328
3329static void quirk_intel_ntb(struct pci_dev *dev)
3330{
3331 int rc;
3332 u8 val;
3333
3334 rc = pci_read_config_byte(dev, 0x00D0, &val);
3335 if (rc)
3336 return;
3337
3338 dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
3339
3340 rc = pci_read_config_byte(dev, 0x00D1, &val);
3341 if (rc)
3342 return;
3343
3344 dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
3345}
3346DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
3347DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361#define I915_DEIER_REG 0x4400c
3362static void disable_igfx_irq(struct pci_dev *dev)
3363{
3364 void __iomem *regs = pci_iomap(dev, 0, 0);
3365 if (regs == NULL) {
3366 pci_warn(dev, "igfx quirk: Can't iomap PCI device\n");
3367 return;
3368 }
3369
3370
3371 if (readl(regs + I915_DEIER_REG) != 0) {
3372 pci_warn(dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
3373
3374 writel(0, regs + I915_DEIER_REG);
3375 }
3376
3377 pci_iounmap(dev, regs);
3378}
3379DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq);
3380DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq);
3381DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq);
3382DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
3383DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq);
3384DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
3385DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
3386
3387
3388
3389
3390
3391static void quirk_remove_d3hot_delay(struct pci_dev *dev)
3392{
3393 dev->d3hot_delay = 0;
3394}
3395
3396DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3hot_delay);
3397DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3hot_delay);
3398DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3hot_delay);
3399
3400DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3hot_delay);
3401DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3hot_delay);
3402DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3hot_delay);
3403DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3hot_delay);
3404DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3hot_delay);
3405DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3hot_delay);
3406DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3hot_delay);
3407DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3hot_delay);
3408DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3hot_delay);
3409DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3hot_delay);
3410DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3hot_delay);
3411
3412DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3hot_delay);
3413DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3hot_delay);
3414DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3hot_delay);
3415DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3hot_delay);
3416DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3hot_delay);
3417DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3hot_delay);
3418DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3hot_delay);
3419DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3hot_delay);
3420DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3hot_delay);
3421
3422
3423
3424
3425
3426
3427static void quirk_broken_intx_masking(struct pci_dev *dev)
3428{
3429 dev->broken_intx_masking = 1;
3430}
3431DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030,
3432 quirk_broken_intx_masking);
3433DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601,
3434 quirk_broken_intx_masking);
3435DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004,
3436 quirk_broken_intx_masking);
3437
3438
3439
3440
3441
3442
3443
3444DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
3445 quirk_broken_intx_masking);
3446
3447
3448
3449
3450
3451DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, quirk_broken_intx_masking);
3452DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, quirk_broken_intx_masking);
3453DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, quirk_broken_intx_masking);
3454DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, quirk_broken_intx_masking);
3455DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, quirk_broken_intx_masking);
3456DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, quirk_broken_intx_masking);
3457DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, quirk_broken_intx_masking);
3458DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, quirk_broken_intx_masking);
3459DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, quirk_broken_intx_masking);
3460DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking);
3461DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking);
3462DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, quirk_broken_intx_masking);
3463DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, quirk_broken_intx_masking);
3464DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking);
3465DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, quirk_broken_intx_masking);
3466DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, quirk_broken_intx_masking);
3467
3468static u16 mellanox_broken_intx_devs[] = {
3469 PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
3470 PCI_DEVICE_ID_MELLANOX_HERMON_DDR,
3471 PCI_DEVICE_ID_MELLANOX_HERMON_QDR,
3472 PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2,
3473 PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2,
3474 PCI_DEVICE_ID_MELLANOX_HERMON_EN,
3475 PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2,
3476 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN,
3477 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2,
3478 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2,
3479 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2,
3480 PCI_DEVICE_ID_MELLANOX_CONNECTX2,
3481 PCI_DEVICE_ID_MELLANOX_CONNECTX3,
3482 PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO,
3483};
3484
3485#define CONNECTX_4_CURR_MAX_MINOR 99
3486#define CONNECTX_4_INTX_SUPPORT_MINOR 14
3487
3488
3489
3490
3491
3492
3493
3494static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
3495{
3496 __be32 __iomem *fw_ver;
3497 u16 fw_major;
3498 u16 fw_minor;
3499 u16 fw_subminor;
3500 u32 fw_maj_min;
3501 u32 fw_sub_min;
3502 int i;
3503
3504 for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) {
3505 if (pdev->device == mellanox_broken_intx_devs[i]) {
3506 pdev->broken_intx_masking = 1;
3507 return;
3508 }
3509 }
3510
3511
3512
3513
3514
3515 if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
3516 return;
3517
3518 if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 &&
3519 pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX)
3520 return;
3521
3522
3523 if (pci_enable_device_mem(pdev)) {
3524 pci_warn(pdev, "Can't enable device memory\n");
3525 return;
3526 }
3527
3528 fw_ver = ioremap(pci_resource_start(pdev, 0), 4);
3529 if (!fw_ver) {
3530 pci_warn(pdev, "Can't map ConnectX-4 initialization segment\n");
3531 goto out;
3532 }
3533
3534
3535 fw_maj_min = ioread32be(fw_ver);
3536 fw_sub_min = ioread32be(fw_ver + 1);
3537 fw_major = fw_maj_min & 0xffff;
3538 fw_minor = fw_maj_min >> 16;
3539 fw_subminor = fw_sub_min & 0xffff;
3540 if (fw_minor > CONNECTX_4_CURR_MAX_MINOR ||
3541 fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) {
3542 pci_warn(pdev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n",
3543 fw_major, fw_minor, fw_subminor, pdev->device ==
3544 PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14);
3545 pdev->broken_intx_masking = 1;
3546 }
3547
3548 iounmap(fw_ver);
3549
3550out:
3551 pci_disable_device(pdev);
3552}
3553DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
3554 mellanox_check_broken_intx_masking);
3555
3556static void quirk_no_bus_reset(struct pci_dev *dev)
3557{
3558 dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
3559}
3560
3561
3562
3563
3564
3565
3566
3567
3568DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
3569DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
3570DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
3571DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
3572DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
3573
3574
3575
3576
3577
3578
3579DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
3580
3581static void quirk_no_pm_reset(struct pci_dev *dev)
3582{
3583
3584
3585
3586
3587 if (!pci_is_root_bus(dev->bus))
3588 dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
3589}
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
3600 PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
3601
3602
3603
3604
3605
3606
3607static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
3608{
3609 if (pdev->is_hotplug_bridge &&
3610 (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
3611 pdev->revision <= 1))
3612 pdev->no_msi = 1;
3613}
3614DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
3615 quirk_thunderbolt_hotplug_msi);
3616DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE,
3617 quirk_thunderbolt_hotplug_msi);
3618DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK,
3619 quirk_thunderbolt_hotplug_msi);
3620DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3621 quirk_thunderbolt_hotplug_msi);
3622DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
3623 quirk_thunderbolt_hotplug_msi);
3624
3625#ifdef CONFIG_ACPI
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
3642{
3643 acpi_handle bridge, SXIO, SXFP, SXLV;
3644
3645 if (!x86_apple_machine)
3646 return;
3647 if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
3648 return;
3649 bridge = ACPI_HANDLE(&dev->dev);
3650 if (!bridge)
3651 return;
3652
3653
3654
3655
3656
3657
3658
3659
3660 if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
3661 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
3662 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
3663 return;
3664 pci_info(dev, "quirk: cutting power to Thunderbolt controller...\n");
3665
3666
3667 acpi_execute_simple_method(SXIO, NULL, 1);
3668 acpi_execute_simple_method(SXFP, NULL, 0);
3669 msleep(300);
3670 acpi_execute_simple_method(SXLV, NULL, 0);
3671 acpi_execute_simple_method(SXIO, NULL, 0);
3672 acpi_execute_simple_method(SXLV, NULL, 0);
3673}
3674DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
3675 PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3676 quirk_apple_poweroff_thunderbolt);
3677#endif
3678
3679
3680
3681
3682
3683
3684static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
3685{
3686
3687
3688
3689
3690
3691
3692
3693
3694 if (!probe)
3695 pcie_flr(dev);
3696 return 0;
3697}
3698
3699#define SOUTH_CHICKEN2 0xc2004
3700#define PCH_PP_STATUS 0xc7200
3701#define PCH_PP_CONTROL 0xc7204
3702#define MSG_CTL 0x45010
3703#define NSDE_PWR_STATE 0xd0100
3704#define IGD_OPERATION_TIMEOUT 10000
3705
3706static int reset_ivb_igd(struct pci_dev *dev, int probe)
3707{
3708 void __iomem *mmio_base;
3709 unsigned long timeout;
3710 u32 val;
3711
3712 if (probe)
3713 return 0;
3714
3715 mmio_base = pci_iomap(dev, 0, 0);
3716 if (!mmio_base)
3717 return -ENOMEM;
3718
3719 iowrite32(0x00000002, mmio_base + MSG_CTL);
3720
3721
3722
3723
3724
3725
3726
3727 iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2);
3728
3729 val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe;
3730 iowrite32(val, mmio_base + PCH_PP_CONTROL);
3731
3732 timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT);
3733 do {
3734 val = ioread32(mmio_base + PCH_PP_STATUS);
3735 if ((val & 0xb0000000) == 0)
3736 goto reset_complete;
3737 msleep(10);
3738 } while (time_before(jiffies, timeout));
3739 pci_warn(dev, "timeout during reset\n");
3740
3741reset_complete:
3742 iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
3743
3744 pci_iounmap(dev, mmio_base);
3745 return 0;
3746}
3747
3748
3749static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
3750{
3751 u16 old_command;
3752 u16 msix_flags;
3753
3754
3755
3756
3757
3758 if ((dev->device & 0xf000) != 0x4000)
3759 return -ENOTTY;
3760
3761
3762
3763
3764
3765 if (probe)
3766 return 0;
3767
3768
3769
3770
3771
3772
3773
3774 pci_read_config_word(dev, PCI_COMMAND, &old_command);
3775 pci_write_config_word(dev, PCI_COMMAND,
3776 old_command | PCI_COMMAND_MASTER);
3777
3778
3779
3780
3781
3782 pci_save_state(dev);
3783
3784
3785
3786
3787
3788
3789
3790
3791 pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
3792 if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
3793 pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
3794 msix_flags |
3795 PCI_MSIX_FLAGS_ENABLE |
3796 PCI_MSIX_FLAGS_MASKALL);
3797
3798 pcie_flr(dev);
3799
3800
3801
3802
3803
3804
3805 pci_restore_state(dev);
3806 pci_write_config_word(dev, PCI_COMMAND, old_command);
3807 return 0;
3808}
3809
3810#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
3811#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
3812#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827static int nvme_disable_and_flr(struct pci_dev *dev, int probe)
3828{
3829 void __iomem *bar;
3830 u16 cmd;
3831 u32 cfg;
3832
3833 if (dev->class != PCI_CLASS_STORAGE_EXPRESS ||
3834 !pcie_has_flr(dev) || !pci_resource_start(dev, 0))
3835 return -ENOTTY;
3836
3837 if (probe)
3838 return 0;
3839
3840 bar = pci_iomap(dev, 0, NVME_REG_CC + sizeof(cfg));
3841 if (!bar)
3842 return -ENOTTY;
3843
3844 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3845 pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MEMORY);
3846
3847 cfg = readl(bar + NVME_REG_CC);
3848
3849
3850 if (cfg & NVME_CC_ENABLE) {
3851 u32 cap = readl(bar + NVME_REG_CAP);
3852 unsigned long timeout;
3853
3854
3855
3856
3857
3858
3859 cfg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
3860
3861 writel(cfg, bar + NVME_REG_CC);
3862
3863
3864
3865
3866
3867
3868
3869
3870 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
3871
3872 for (;;) {
3873 u32 status = readl(bar + NVME_REG_CSTS);
3874
3875
3876 if (!(status & NVME_CSTS_RDY))
3877 break;
3878
3879 msleep(100);
3880
3881 if (time_after(jiffies, timeout)) {
3882 pci_warn(dev, "Timeout waiting for NVMe ready status to clear after disable\n");
3883 break;
3884 }
3885 }
3886 }
3887
3888 pci_iounmap(dev, bar);
3889
3890 pcie_flr(dev);
3891
3892 return 0;
3893}
3894
3895
3896
3897
3898
3899
3900
3901static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
3902{
3903 if (!pcie_has_flr(dev))
3904 return -ENOTTY;
3905
3906 if (probe)
3907 return 0;
3908
3909 pcie_flr(dev);
3910
3911 msleep(250);
3912
3913 return 0;
3914}
3915
3916static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
3917 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
3918 reset_intel_82599_sfp_virtfn },
3919 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
3920 reset_ivb_igd },
3921 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
3922 reset_ivb_igd },
3923 { PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr },
3924 { PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
3925 { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
3926 reset_chelsio_generic_dev },
3927 { 0 }
3928};
3929
3930
3931
3932
3933
3934
3935int pci_dev_specific_reset(struct pci_dev *dev, int probe)
3936{
3937 const struct pci_dev_reset_methods *i;
3938
3939 for (i = pci_dev_reset_methods; i->reset; i++) {
3940 if ((i->vendor == dev->vendor ||
3941 i->vendor == (u16)PCI_ANY_ID) &&
3942 (i->device == dev->device ||
3943 i->device == (u16)PCI_ANY_ID))
3944 return i->reset(dev, probe);
3945 }
3946
3947 return -ENOTTY;
3948}
3949
3950static void quirk_dma_func0_alias(struct pci_dev *dev)
3951{
3952 if (PCI_FUNC(dev->devfn) != 0)
3953 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0), 1);
3954}
3955
3956
3957
3958
3959
3960
3961DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
3962DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
3963
3964static void quirk_dma_func1_alias(struct pci_dev *dev)
3965{
3966 if (PCI_FUNC(dev->devfn) != 1)
3967 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1), 1);
3968}
3969
3970
3971
3972
3973
3974
3975
3976DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
3977 quirk_dma_func1_alias);
3978DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
3979 quirk_dma_func1_alias);
3980DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
3981 quirk_dma_func1_alias);
3982
3983DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
3984 quirk_dma_func1_alias);
3985DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
3986 quirk_dma_func1_alias);
3987
3988DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
3989 quirk_dma_func1_alias);
3990
3991DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
3992 quirk_dma_func1_alias);
3993
3994DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
3995 quirk_dma_func1_alias);
3996
3997DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183,
3998 quirk_dma_func1_alias);
3999
4000DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
4001 quirk_dma_func1_alias);
4002
4003DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9215,
4004 quirk_dma_func1_alias);
4005
4006DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
4007 quirk_dma_func1_alias);
4008
4009DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
4010 quirk_dma_func1_alias);
4011DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
4012 quirk_dma_func1_alias);
4013DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
4014 quirk_dma_func1_alias);
4015
4016DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
4017 PCI_DEVICE_ID_JMICRON_JMB388_ESD,
4018 quirk_dma_func1_alias);
4019
4020DECLARE_PCI_FIXUP_HEADER(0x1c28,
4021 0x0122,
4022 quirk_dma_func1_alias);
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039static const struct pci_device_id fixed_dma_alias_tbl[] = {
4040 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
4041 PCI_VENDOR_ID_ADAPTEC2, 0x02bb),
4042 .driver_data = PCI_DEVFN(1, 0) },
4043 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
4044 PCI_VENDOR_ID_ADAPTEC2, 0x02bc),
4045 .driver_data = PCI_DEVFN(1, 0) },
4046 { 0 }
4047};
4048
4049static void quirk_fixed_dma_alias(struct pci_dev *dev)
4050{
4051 const struct pci_device_id *id;
4052
4053 id = pci_match_id(fixed_dma_alias_tbl, dev);
4054 if (id)
4055 pci_add_dma_alias(dev, id->driver_data, 1);
4056}
4057DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev)
4069{
4070 if (!pci_is_root_bus(pdev->bus) &&
4071 pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
4072 !pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) &&
4073 pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE)
4074 pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS;
4075}
4076
4077DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
4078 quirk_use_pcie_bridge_dma_alias);
4079
4080DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
4081
4082DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
4083
4084DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8893, quirk_use_pcie_bridge_dma_alias);
4085
4086DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
4087
4088
4089
4090
4091
4092
4093
4094static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
4095{
4096 pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0), 1);
4097 pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0), 1);
4098 pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3), 1);
4099}
4100DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
4101DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117static void quirk_pex_vca_alias(struct pci_dev *pdev)
4118{
4119 const unsigned int num_pci_slots = 0x20;
4120 unsigned int slot;
4121
4122 for (slot = 0; slot < num_pci_slots; slot++)
4123 pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0), 5);
4124}
4125DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
4126DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
4127DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2956, quirk_pex_vca_alias);
4128DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2958, quirk_pex_vca_alias);
4129DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2959, quirk_pex_vca_alias);
4130DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x295A, quirk_pex_vca_alias);
4131
4132
4133
4134
4135
4136
4137static void quirk_bridge_cavm_thrx2_pcie_root(struct pci_dev *pdev)
4138{
4139 pdev->dev_flags |= PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT;
4140}
4141DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000,
4142 quirk_bridge_cavm_thrx2_pcie_root);
4143DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084,
4144 quirk_bridge_cavm_thrx2_pcie_root);
4145
4146
4147
4148
4149
4150static void quirk_tw686x_class(struct pci_dev *pdev)
4151{
4152 u32 class = pdev->class;
4153
4154
4155 pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01;
4156 pci_info(pdev, "TW686x PCI class overridden (%#08x -> %#08x)\n",
4157 class, pdev->class);
4158}
4159DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8,
4160 quirk_tw686x_class);
4161DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6865, PCI_CLASS_NOT_DEFINED, 8,
4162 quirk_tw686x_class);
4163DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
4164 quirk_tw686x_class);
4165DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
4166 quirk_tw686x_class);
4167
4168
4169
4170
4171
4172
4173static void quirk_relaxedordering_disable(struct pci_dev *dev)
4174{
4175 dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
4176 pci_info(dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
4177}
4178
4179
4180
4181
4182
4183
4184DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
4185 quirk_relaxedordering_disable);
4186DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8,
4187 quirk_relaxedordering_disable);
4188DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8,
4189 quirk_relaxedordering_disable);
4190DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8,
4191 quirk_relaxedordering_disable);
4192DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8,
4193 quirk_relaxedordering_disable);
4194DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8,
4195 quirk_relaxedordering_disable);
4196DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8,
4197 quirk_relaxedordering_disable);
4198DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8,
4199 quirk_relaxedordering_disable);
4200DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8,
4201 quirk_relaxedordering_disable);
4202DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8,
4203 quirk_relaxedordering_disable);
4204DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8,
4205 quirk_relaxedordering_disable);
4206DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8,
4207 quirk_relaxedordering_disable);
4208DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8,
4209 quirk_relaxedordering_disable);
4210DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8,
4211 quirk_relaxedordering_disable);
4212DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8,
4213 quirk_relaxedordering_disable);
4214DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8,
4215 quirk_relaxedordering_disable);
4216DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8,
4217 quirk_relaxedordering_disable);
4218DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8,
4219 quirk_relaxedordering_disable);
4220DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8,
4221 quirk_relaxedordering_disable);
4222DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8,
4223 quirk_relaxedordering_disable);
4224DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8,
4225 quirk_relaxedordering_disable);
4226DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8,
4227 quirk_relaxedordering_disable);
4228DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8,
4229 quirk_relaxedordering_disable);
4230DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8,
4231 quirk_relaxedordering_disable);
4232DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8,
4233 quirk_relaxedordering_disable);
4234DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8,
4235 quirk_relaxedordering_disable);
4236DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8,
4237 quirk_relaxedordering_disable);
4238DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8,
4239 quirk_relaxedordering_disable);
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8,
4251 quirk_relaxedordering_disable);
4252DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8,
4253 quirk_relaxedordering_disable);
4254DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8,
4255 quirk_relaxedordering_disable);
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
4281{
4282 struct pci_dev *root_port = pcie_find_root_port(pdev);
4283
4284 if (!root_port) {
4285 pci_warn(pdev, "PCIe Completion erratum may cause device errors\n");
4286 return;
4287 }
4288
4289 pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
4290 dev_name(&pdev->dev));
4291 pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
4292 PCI_EXP_DEVCTL_RELAX_EN |
4293 PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
4294}
4295
4296
4297
4298
4299
4300static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
4301{
4302
4303
4304
4305
4306
4307
4308 if ((pdev->device & 0xff00) == 0x5400)
4309 quirk_disable_root_port_attributes(pdev);
4310}
4311DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
4312 quirk_chelsio_T5_disable_root_port_attributes);
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena)
4326{
4327 if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req)
4328 return 1;
4329 return 0;
4330}
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
4358{
4359#ifdef CONFIG_ACPI
4360 struct acpi_table_header *header = NULL;
4361 acpi_status status;
4362
4363
4364 if (!dev->multifunction || !pci_is_root_bus(dev->bus))
4365 return -ENODEV;
4366
4367
4368 status = acpi_get_table("IVRS", 0, &header);
4369 if (ACPI_FAILURE(status))
4370 return -ENODEV;
4371
4372 acpi_put_table(header);
4373
4374
4375 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
4376
4377 return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR);
4378#else
4379 return -ENODEV;
4380#endif
4381}
4382
4383static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
4384{
4385 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4386 return false;
4387
4388 switch (dev->device) {
4389
4390
4391
4392
4393 case 0xa000 ... 0xa7ff:
4394 case 0xaf84:
4395 case 0xb884:
4396 return true;
4397 default:
4398 return false;
4399 }
4400}
4401
4402static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
4403{
4404 if (!pci_quirk_cavium_acs_match(dev))
4405 return -ENOTTY;
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415 return pci_acs_ctrl_enabled(acs_flags,
4416 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4417}
4418
4419static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
4420{
4421
4422
4423
4424
4425
4426 return pci_acs_ctrl_enabled(acs_flags,
4427 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4428}
4429
4430
4431
4432
4433
4434
4435static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
4436{
4437 if (!pci_is_pcie(dev) ||
4438 ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
4439 (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
4440 return -ENOTTY;
4441
4442 switch (dev->device) {
4443 case 0x0710 ... 0x071e:
4444 case 0x0721:
4445 case 0x0723 ... 0x0732:
4446 return pci_acs_ctrl_enabled(acs_flags,
4447 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4448 }
4449
4450 return false;
4451}
4452
4453
4454
4455
4456
4457
4458
4459static const u16 pci_quirk_intel_pch_acs_ids[] = {
4460
4461 0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49,
4462 0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51,
4463
4464 0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17,
4465 0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f,
4466
4467 0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17,
4468 0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f,
4469
4470 0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17,
4471 0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f,
4472
4473 0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17,
4474 0x9c18, 0x9c19, 0x9c1a, 0x9c1b,
4475
4476 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
4477 0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
4478
4479 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
4480
4481 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
4482 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
4483
4484 0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e,
4485};
4486
4487static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
4488{
4489 int i;
4490
4491
4492 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4493 return false;
4494
4495 for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++)
4496 if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
4497 return true;
4498
4499 return false;
4500}
4501
4502static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
4503{
4504 if (!pci_quirk_intel_pch_acs_match(dev))
4505 return -ENOTTY;
4506
4507 if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
4508 return pci_acs_ctrl_enabled(acs_flags,
4509 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4510
4511 return pci_acs_ctrl_enabled(acs_flags, 0);
4512}
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4525{
4526 return pci_acs_ctrl_enabled(acs_flags,
4527 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4528}
4529
4530static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
4531{
4532 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4533 return -ENOTTY;
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543 acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4544
4545 return acs_flags ? 0 : 1;
4546}
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4594{
4595 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4596 return false;
4597
4598 switch (dev->device) {
4599 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a:
4600 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee:
4601 case 0x9d10 ... 0x9d1b:
4602 return true;
4603 }
4604
4605 return false;
4606}
4607
4608#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
4609
4610static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
4611{
4612 int pos;
4613 u32 cap, ctrl;
4614
4615 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4616 return -ENOTTY;
4617
4618 pos = dev->acs_cap;
4619 if (!pos)
4620 return -ENOTTY;
4621
4622
4623 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4624 acs_flags &= (cap | PCI_ACS_EC);
4625
4626 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4627
4628 return pci_acs_ctrl_enabled(acs_flags, ctrl);
4629}
4630
4631static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
4632{
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642 return pci_acs_ctrl_enabled(acs_flags,
4643 PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
4644 PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
4645}
4646
4647static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags)
4648{
4649
4650
4651
4652
4653
4654 if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END)
4655 return -ENOTTY;
4656
4657 return pci_acs_ctrl_enabled(acs_flags,
4658 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4659}
4660
4661static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
4662{
4663
4664
4665
4666
4667
4668
4669 return pci_acs_ctrl_enabled(acs_flags,
4670 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4671}
4672
4673static const struct pci_dev_acs_enabled {
4674 u16 vendor;
4675 u16 device;
4676 int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
4677} pci_dev_acs_enabled[] = {
4678 { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
4679 { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
4680 { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
4681 { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
4682 { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
4683 { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
4684 { PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs },
4685 { PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
4686 { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
4687 { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
4688 { PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs },
4689 { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
4690 { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
4691 { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
4692 { PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
4693 { PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
4694 { PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
4695 { PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
4696 { PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
4697 { PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
4698 { PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
4699 { PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
4700 { PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
4701 { PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
4702 { PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
4703 { PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
4704 { PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
4705 { PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
4706 { PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
4707 { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
4708 { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
4709
4710 { PCI_VENDOR_ID_INTEL, 0x1509, pci_quirk_mf_endpoint_acs },
4711 { PCI_VENDOR_ID_INTEL, 0x150E, pci_quirk_mf_endpoint_acs },
4712 { PCI_VENDOR_ID_INTEL, 0x150F, pci_quirk_mf_endpoint_acs },
4713 { PCI_VENDOR_ID_INTEL, 0x1510, pci_quirk_mf_endpoint_acs },
4714 { PCI_VENDOR_ID_INTEL, 0x1511, pci_quirk_mf_endpoint_acs },
4715 { PCI_VENDOR_ID_INTEL, 0x1516, pci_quirk_mf_endpoint_acs },
4716 { PCI_VENDOR_ID_INTEL, 0x1527, pci_quirk_mf_endpoint_acs },
4717
4718 { PCI_VENDOR_ID_INTEL, 0x10C9, pci_quirk_mf_endpoint_acs },
4719 { PCI_VENDOR_ID_INTEL, 0x10E6, pci_quirk_mf_endpoint_acs },
4720 { PCI_VENDOR_ID_INTEL, 0x10E7, pci_quirk_mf_endpoint_acs },
4721 { PCI_VENDOR_ID_INTEL, 0x10E8, pci_quirk_mf_endpoint_acs },
4722 { PCI_VENDOR_ID_INTEL, 0x150A, pci_quirk_mf_endpoint_acs },
4723 { PCI_VENDOR_ID_INTEL, 0x150D, pci_quirk_mf_endpoint_acs },
4724 { PCI_VENDOR_ID_INTEL, 0x1518, pci_quirk_mf_endpoint_acs },
4725 { PCI_VENDOR_ID_INTEL, 0x1526, pci_quirk_mf_endpoint_acs },
4726
4727 { PCI_VENDOR_ID_INTEL, 0x10A7, pci_quirk_mf_endpoint_acs },
4728 { PCI_VENDOR_ID_INTEL, 0x10A9, pci_quirk_mf_endpoint_acs },
4729 { PCI_VENDOR_ID_INTEL, 0x10D6, pci_quirk_mf_endpoint_acs },
4730
4731 { PCI_VENDOR_ID_INTEL, 0x1521, pci_quirk_mf_endpoint_acs },
4732 { PCI_VENDOR_ID_INTEL, 0x1522, pci_quirk_mf_endpoint_acs },
4733 { PCI_VENDOR_ID_INTEL, 0x1523, pci_quirk_mf_endpoint_acs },
4734 { PCI_VENDOR_ID_INTEL, 0x1524, pci_quirk_mf_endpoint_acs },
4735
4736 { PCI_VENDOR_ID_INTEL, 0x105E, pci_quirk_mf_endpoint_acs },
4737 { PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs },
4738 { PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs },
4739 { PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs },
4740
4741 { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
4742 { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
4743 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs },
4744
4745 { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
4746 { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
4747
4748 { PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs },
4749
4750 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
4751 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
4752 { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs },
4753 { 0x10df, 0x720, pci_quirk_mf_endpoint_acs },
4754
4755 { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
4756
4757 { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
4758
4759 { PCI_VENDOR_ID_AMPERE, 0xE005, pci_quirk_xgene_acs },
4760 { PCI_VENDOR_ID_AMPERE, 0xE006, pci_quirk_xgene_acs },
4761 { PCI_VENDOR_ID_AMPERE, 0xE007, pci_quirk_xgene_acs },
4762 { PCI_VENDOR_ID_AMPERE, 0xE008, pci_quirk_xgene_acs },
4763 { PCI_VENDOR_ID_AMPERE, 0xE009, pci_quirk_xgene_acs },
4764 { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
4765 { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
4766 { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
4767 { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
4768
4769 { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
4770
4771 { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
4772 { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
4773 { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
4774
4775 { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
4776 { 0 }
4777};
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
4791{
4792 const struct pci_dev_acs_enabled *i;
4793 int ret;
4794
4795
4796
4797
4798
4799
4800
4801 for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
4802 if ((i->vendor == dev->vendor ||
4803 i->vendor == (u16)PCI_ANY_ID) &&
4804 (i->device == dev->device ||
4805 i->device == (u16)PCI_ANY_ID)) {
4806 ret = i->acs_enabled(dev, acs_flags);
4807 if (ret >= 0)
4808 return ret;
4809 }
4810 }
4811
4812 return -ENOTTY;
4813}
4814
4815
4816#define INTEL_LPC_RCBA_REG 0xf0
4817
4818#define INTEL_LPC_RCBA_MASK 0xffffc000
4819
4820#define INTEL_LPC_RCBA_ENABLE (1 << 0)
4821
4822
4823#define INTEL_BSPR_REG 0x1104
4824
4825#define INTEL_BSPR_REG_BPNPD (1 << 8)
4826
4827#define INTEL_BSPR_REG_BPPD (1 << 9)
4828
4829
4830#define INTEL_UPDCR_REG 0x1014
4831
4832#define INTEL_UPDCR_REG_MASK 0x3f
4833
4834static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
4835{
4836 u32 rcba, bspr, updcr;
4837 void __iomem *rcba_mem;
4838
4839
4840
4841
4842
4843
4844 pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
4845 INTEL_LPC_RCBA_REG, &rcba);
4846 if (!(rcba & INTEL_LPC_RCBA_ENABLE))
4847 return -EINVAL;
4848
4849 rcba_mem = ioremap(rcba & INTEL_LPC_RCBA_MASK,
4850 PAGE_ALIGN(INTEL_UPDCR_REG));
4851 if (!rcba_mem)
4852 return -ENOMEM;
4853
4854
4855
4856
4857
4858
4859
4860
4861 bspr = readl(rcba_mem + INTEL_BSPR_REG);
4862 bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD;
4863 if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
4864 updcr = readl(rcba_mem + INTEL_UPDCR_REG);
4865 if (updcr & INTEL_UPDCR_REG_MASK) {
4866 pci_info(dev, "Disabling UPDCR peer decodes\n");
4867 updcr &= ~INTEL_UPDCR_REG_MASK;
4868 writel(updcr, rcba_mem + INTEL_UPDCR_REG);
4869 }
4870 }
4871
4872 iounmap(rcba_mem);
4873 return 0;
4874}
4875
4876
4877#define INTEL_MPC_REG 0xd8
4878
4879#define INTEL_MPC_REG_IRBNCE (1 << 26)
4880
4881static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
4882{
4883 u32 mpc;
4884
4885
4886
4887
4888
4889
4890
4891 pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
4892 if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
4893 pci_info(dev, "Enabling MPC IRBNCE\n");
4894 mpc |= INTEL_MPC_REG_IRBNCE;
4895 pci_write_config_word(dev, INTEL_MPC_REG, mpc);
4896 }
4897}
4898
4899
4900
4901
4902
4903
4904
4905
4906static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
4907{
4908 if (!pci_quirk_intel_pch_acs_match(dev))
4909 return -ENOTTY;
4910
4911 if (pci_quirk_enable_intel_lpc_acs(dev)) {
4912 pci_warn(dev, "Failed to enable Intel PCH ACS quirk\n");
4913 return 0;
4914 }
4915
4916 pci_quirk_enable_intel_rp_mpc_acs(dev);
4917
4918 dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
4919
4920 pci_info(dev, "Intel PCH root port ACS workaround enabled\n");
4921
4922 return 0;
4923}
4924
4925static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
4926{
4927 int pos;
4928 u32 cap, ctrl;
4929
4930 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4931 return -ENOTTY;
4932
4933 pos = dev->acs_cap;
4934 if (!pos)
4935 return -ENOTTY;
4936
4937 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4938 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4939
4940 ctrl |= (cap & PCI_ACS_SV);
4941 ctrl |= (cap & PCI_ACS_RR);
4942 ctrl |= (cap & PCI_ACS_CR);
4943 ctrl |= (cap & PCI_ACS_UF);
4944
4945 if (dev->external_facing || dev->untrusted)
4946 ctrl |= (cap & PCI_ACS_TB);
4947
4948 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
4949
4950 pci_info(dev, "Intel SPT PCH root port ACS workaround enabled\n");
4951
4952 return 0;
4953}
4954
4955static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev)
4956{
4957 int pos;
4958 u32 cap, ctrl;
4959
4960 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4961 return -ENOTTY;
4962
4963 pos = dev->acs_cap;
4964 if (!pos)
4965 return -ENOTTY;
4966
4967 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4968 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4969
4970 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
4971
4972 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
4973
4974 pci_info(dev, "Intel SPT PCH root port workaround: disabled ACS redirect\n");
4975
4976 return 0;
4977}
4978
4979static const struct pci_dev_acs_ops {
4980 u16 vendor;
4981 u16 device;
4982 int (*enable_acs)(struct pci_dev *dev);
4983 int (*disable_acs_redir)(struct pci_dev *dev);
4984} pci_dev_acs_ops[] = {
4985 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
4986 .enable_acs = pci_quirk_enable_intel_pch_acs,
4987 },
4988 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
4989 .enable_acs = pci_quirk_enable_intel_spt_pch_acs,
4990 .disable_acs_redir = pci_quirk_disable_intel_spt_pch_acs_redir,
4991 },
4992};
4993
4994int pci_dev_specific_enable_acs(struct pci_dev *dev)
4995{
4996 const struct pci_dev_acs_ops *p;
4997 int i, ret;
4998
4999 for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
5000 p = &pci_dev_acs_ops[i];
5001 if ((p->vendor == dev->vendor ||
5002 p->vendor == (u16)PCI_ANY_ID) &&
5003 (p->device == dev->device ||
5004 p->device == (u16)PCI_ANY_ID) &&
5005 p->enable_acs) {
5006 ret = p->enable_acs(dev);
5007 if (ret >= 0)
5008 return ret;
5009 }
5010 }
5011
5012 return -ENOTTY;
5013}
5014
5015int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
5016{
5017 const struct pci_dev_acs_ops *p;
5018 int i, ret;
5019
5020 for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
5021 p = &pci_dev_acs_ops[i];
5022 if ((p->vendor == dev->vendor ||
5023 p->vendor == (u16)PCI_ANY_ID) &&
5024 (p->device == dev->device ||
5025 p->device == (u16)PCI_ANY_ID) &&
5026 p->disable_acs_redir) {
5027 ret = p->disable_acs_redir(dev);
5028 if (ret >= 0)
5029 return ret;
5030 }
5031 }
5032
5033 return -ENOTTY;
5034}
5035
5036
5037
5038
5039
5040
5041
5042
5043static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
5044{
5045 int pos, i = 0;
5046 u8 next_cap;
5047 u16 reg16, *cap;
5048 struct pci_cap_saved_state *state;
5049
5050
5051 if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP))
5052 return;
5053
5054
5055 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
5056 if (!pos)
5057 return;
5058
5059
5060
5061
5062
5063 pci_read_config_byte(pdev, pos + 1, &next_cap);
5064 if (next_cap)
5065 return;
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075 pos = 0x50;
5076 pci_read_config_word(pdev, pos, ®16);
5077 if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) {
5078 u32 status;
5079#ifndef PCI_EXP_SAVE_REGS
5080#define PCI_EXP_SAVE_REGS 7
5081#endif
5082 int size = PCI_EXP_SAVE_REGS * sizeof(u16);
5083
5084 pdev->pcie_cap = pos;
5085 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
5086 pdev->pcie_flags_reg = reg16;
5087 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16);
5088 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
5089
5090 pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
5091 if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
5092 PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
5093 pdev->cfg_size = PCI_CFG_SPACE_SIZE;
5094
5095 if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
5096 return;
5097
5098
5099 state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
5100 if (!state)
5101 return;
5102
5103 state->cap.cap_nr = PCI_CAP_ID_EXP;
5104 state->cap.cap_extended = 0;
5105 state->cap.size = size;
5106 cap = (u16 *)&state->cap.data[0];
5107 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]);
5108 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]);
5109 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]);
5110 pcie_capability_read_word(pdev, PCI_EXP_RTCTL, &cap[i++]);
5111 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]);
5112 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]);
5113 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]);
5114 hlist_add_head(&state->next, &pdev->saved_cap_space);
5115 }
5116}
5117DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129static void quirk_no_flr(struct pci_dev *dev)
5130{
5131 dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
5132}
5133DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
5134DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
5135DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
5136DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
5137DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
5138
5139static void quirk_no_ext_tags(struct pci_dev *pdev)
5140{
5141 struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
5142
5143 if (!bridge)
5144 return;
5145
5146 bridge->no_ext_tags = 1;
5147 pci_info(pdev, "disabling Extended Tags (this device can't handle them)\n");
5148
5149 pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
5150}
5151DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
5152DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
5153DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
5154DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags);
5155DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);
5156DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
5157DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
5158
5159#ifdef CONFIG_PCI_ATS
5160
5161
5162
5163
5164
5165static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
5166{
5167 if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
5168 (pdev->device == 0x7340 && pdev->revision != 0xc5))
5169 return;
5170
5171 if (pdev->device == 0x15d8) {
5172 if (pdev->revision == 0xcf &&
5173 pdev->subsystem_vendor == 0xea50 &&
5174 (pdev->subsystem_device == 0xce19 ||
5175 pdev->subsystem_device == 0xcc10 ||
5176 pdev->subsystem_device == 0xcc08))
5177 goto no_ats;
5178 else
5179 return;
5180 }
5181
5182no_ats:
5183 pci_info(pdev, "disabling ATS\n");
5184 pdev->ats_cap = 0;
5185}
5186
5187
5188DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
5189
5190DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
5191
5192DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
5193
5194DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
5195
5196DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
5197#endif
5198
5199
5200static void quirk_fsl_no_msi(struct pci_dev *pdev)
5201{
5202 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
5203 pdev->no_msi = 1;
5204}
5205DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer,
5216 unsigned int supplier, unsigned int class,
5217 unsigned int class_shift)
5218{
5219 struct pci_dev *supplier_pdev;
5220
5221 if (PCI_FUNC(pdev->devfn) != consumer)
5222 return;
5223
5224 supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
5225 pdev->bus->number,
5226 PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier));
5227 if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) {
5228 pci_dev_put(supplier_pdev);
5229 return;
5230 }
5231
5232 if (device_link_add(&pdev->dev, &supplier_pdev->dev,
5233 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME))
5234 pci_info(pdev, "D0 power state depends on %s\n",
5235 pci_name(supplier_pdev));
5236 else
5237 pci_err(pdev, "Cannot enforce power dependency on %s\n",
5238 pci_name(supplier_pdev));
5239
5240 pm_runtime_allow(&pdev->dev);
5241 pci_dev_put(supplier_pdev);
5242}
5243
5244
5245
5246
5247
5248static void quirk_gpu_hda(struct pci_dev *hda)
5249{
5250 pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16);
5251}
5252DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
5253 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5254DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID,
5255 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5256DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5257 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5258
5259
5260
5261
5262
5263static void quirk_gpu_usb(struct pci_dev *usb)
5264{
5265 pci_create_device_link(usb, 2, 0, PCI_BASE_CLASS_DISPLAY, 16);
5266}
5267DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5268 PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
5269
5270
5271
5272
5273
5274
5275
5276#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80
5277static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
5278{
5279 pci_create_device_link(ucsi, 3, 0, PCI_BASE_CLASS_DISPLAY, 16);
5280}
5281DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5282 PCI_CLASS_SERIAL_UNKNOWN, 8,
5283 quirk_gpu_usb_typec_ucsi);
5284
5285
5286
5287
5288
5289static void quirk_nvidia_hda(struct pci_dev *gpu)
5290{
5291 u8 hdr_type;
5292 u32 val;
5293
5294
5295 if (gpu->device < PCI_DEVICE_ID_NVIDIA_GEFORCE_320M)
5296 return;
5297
5298
5299 pci_read_config_dword(gpu, 0x488, &val);
5300 if (val & BIT(25))
5301 return;
5302
5303 pci_info(gpu, "Enabling HDA controller\n");
5304 pci_write_config_dword(gpu, 0x488, val | BIT(25));
5305
5306
5307 pci_read_config_byte(gpu, PCI_HEADER_TYPE, &hdr_type);
5308 gpu->multifunction = !!(hdr_type & 0x80);
5309}
5310DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5311 PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
5312DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5313 PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout)
5341{
5342 int pos;
5343 u16 ctrl = 0;
5344 bool found;
5345 struct pci_dev *bridge = bus->self;
5346
5347 pos = bridge->acs_cap;
5348
5349
5350 if (pos) {
5351 pci_read_config_word(bridge, pos + PCI_ACS_CTRL, &ctrl);
5352 if (ctrl & PCI_ACS_SV)
5353 pci_write_config_word(bridge, pos + PCI_ACS_CTRL,
5354 ctrl & ~PCI_ACS_SV);
5355 }
5356
5357 found = pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
5358
5359
5360 if (found)
5361 pci_bus_write_config_word(bus, devfn, PCI_VENDOR_ID, 0);
5362
5363
5364 if (ctrl & PCI_ACS_SV)
5365 pci_write_config_word(bridge, pos + PCI_ACS_CTRL, ctrl);
5366
5367 return found;
5368}
5369
5370
5371
5372
5373
5374
5375
5376
5377static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
5378{
5379 void __iomem *mmio;
5380 struct ntb_info_regs __iomem *mmio_ntb;
5381 struct ntb_ctrl_regs __iomem *mmio_ctrl;
5382 u64 partition_map;
5383 u8 partition;
5384 int pp;
5385
5386 if (pci_enable_device(pdev)) {
5387 pci_err(pdev, "Cannot enable Switchtec device\n");
5388 return;
5389 }
5390
5391 mmio = pci_iomap(pdev, 0, 0);
5392 if (mmio == NULL) {
5393 pci_disable_device(pdev);
5394 pci_err(pdev, "Cannot iomap Switchtec device\n");
5395 return;
5396 }
5397
5398 pci_info(pdev, "Setting Switchtec proxy ID aliases\n");
5399
5400 mmio_ntb = mmio + SWITCHTEC_GAS_NTB_OFFSET;
5401 mmio_ctrl = (void __iomem *) mmio_ntb + SWITCHTEC_NTB_REG_CTRL_OFFSET;
5402
5403 partition = ioread8(&mmio_ntb->partition_id);
5404
5405 partition_map = ioread32(&mmio_ntb->ep_map);
5406 partition_map |= ((u64) ioread32(&mmio_ntb->ep_map + 4)) << 32;
5407 partition_map &= ~(1ULL << partition);
5408
5409 for (pp = 0; pp < (sizeof(partition_map) * 8); pp++) {
5410 struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
5411 u32 table_sz = 0;
5412 int te;
5413
5414 if (!(partition_map & (1ULL << pp)))
5415 continue;
5416
5417 pci_dbg(pdev, "Processing partition %d\n", pp);
5418
5419 mmio_peer_ctrl = &mmio_ctrl[pp];
5420
5421 table_sz = ioread16(&mmio_peer_ctrl->req_id_table_size);
5422 if (!table_sz) {
5423 pci_warn(pdev, "Partition %d table_sz 0\n", pp);
5424 continue;
5425 }
5426
5427 if (table_sz > 512) {
5428 pci_warn(pdev,
5429 "Invalid Switchtec partition %d table_sz %d\n",
5430 pp, table_sz);
5431 continue;
5432 }
5433
5434 for (te = 0; te < table_sz; te++) {
5435 u32 rid_entry;
5436 u8 devfn;
5437
5438 rid_entry = ioread32(&mmio_peer_ctrl->req_id_table[te]);
5439 devfn = (rid_entry >> 1) & 0xFF;
5440 pci_dbg(pdev,
5441 "Aliasing Partition %d Proxy ID %02x.%d\n",
5442 pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
5443 pci_add_dma_alias(pdev, devfn, 1);
5444 }
5445 }
5446
5447 pci_iounmap(pdev, mmio);
5448 pci_disable_device(pdev);
5449}
5450#define SWITCHTEC_QUIRK(vid) \
5451 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
5452 PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
5453
5454SWITCHTEC_QUIRK(0x8531);
5455SWITCHTEC_QUIRK(0x8532);
5456SWITCHTEC_QUIRK(0x8533);
5457SWITCHTEC_QUIRK(0x8534);
5458SWITCHTEC_QUIRK(0x8535);
5459SWITCHTEC_QUIRK(0x8536);
5460SWITCHTEC_QUIRK(0x8541);
5461SWITCHTEC_QUIRK(0x8542);
5462SWITCHTEC_QUIRK(0x8543);
5463SWITCHTEC_QUIRK(0x8544);
5464SWITCHTEC_QUIRK(0x8545);
5465SWITCHTEC_QUIRK(0x8546);
5466SWITCHTEC_QUIRK(0x8551);
5467SWITCHTEC_QUIRK(0x8552);
5468SWITCHTEC_QUIRK(0x8553);
5469SWITCHTEC_QUIRK(0x8554);
5470SWITCHTEC_QUIRK(0x8555);
5471SWITCHTEC_QUIRK(0x8556);
5472SWITCHTEC_QUIRK(0x8561);
5473SWITCHTEC_QUIRK(0x8562);
5474SWITCHTEC_QUIRK(0x8563);
5475SWITCHTEC_QUIRK(0x8564);
5476SWITCHTEC_QUIRK(0x8565);
5477SWITCHTEC_QUIRK(0x8566);
5478SWITCHTEC_QUIRK(0x8571);
5479SWITCHTEC_QUIRK(0x8572);
5480SWITCHTEC_QUIRK(0x8573);
5481SWITCHTEC_QUIRK(0x8574);
5482SWITCHTEC_QUIRK(0x8575);
5483SWITCHTEC_QUIRK(0x8576);
5484SWITCHTEC_QUIRK(0x4000);
5485SWITCHTEC_QUIRK(0x4084);
5486SWITCHTEC_QUIRK(0x4068);
5487SWITCHTEC_QUIRK(0x4052);
5488SWITCHTEC_QUIRK(0x4036);
5489SWITCHTEC_QUIRK(0x4028);
5490SWITCHTEC_QUIRK(0x4100);
5491SWITCHTEC_QUIRK(0x4184);
5492SWITCHTEC_QUIRK(0x4168);
5493SWITCHTEC_QUIRK(0x4152);
5494SWITCHTEC_QUIRK(0x4136);
5495SWITCHTEC_QUIRK(0x4128);
5496SWITCHTEC_QUIRK(0x4200);
5497SWITCHTEC_QUIRK(0x4284);
5498SWITCHTEC_QUIRK(0x4268);
5499SWITCHTEC_QUIRK(0x4252);
5500SWITCHTEC_QUIRK(0x4236);
5501SWITCHTEC_QUIRK(0x4228);
5502
5503
5504
5505
5506
5507
5508
5509static void quirk_plx_ntb_dma_alias(struct pci_dev *pdev)
5510{
5511 pci_info(pdev, "Setting PLX NTB proxy ID aliases\n");
5512
5513 pci_add_dma_alias(pdev, 0, 256);
5514}
5515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b0, quirk_plx_ntb_dma_alias);
5516DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b1, quirk_plx_ntb_dma_alias);
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
5535{
5536 void __iomem *map;
5537 int ret;
5538
5539 if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
5540 pdev->subsystem_device != 0x222e ||
5541 !pdev->reset_fn)
5542 return;
5543
5544 if (pci_enable_device_mem(pdev))
5545 return;
5546
5547
5548
5549
5550
5551 map = pci_iomap(pdev, 0, 0x23000);
5552 if (!map) {
5553 pci_err(pdev, "Can't map MMIO space\n");
5554 goto out_disable;
5555 }
5556
5557
5558
5559
5560
5561 if (ioread32(map + 0x2240c) & 0x2) {
5562 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
5563 ret = pci_reset_bus(pdev);
5564 if (ret < 0)
5565 pci_err(pdev, "Failed to reset GPU: %d\n", ret);
5566 }
5567
5568 iounmap(map);
5569out_disable:
5570 pci_disable_device(pdev);
5571}
5572DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
5573 PCI_CLASS_DISPLAY_VGA, 8,
5574 quirk_reset_lenovo_thinkpad_p50_nvgpu);
5575
5576
5577
5578
5579
5580static void pci_fixup_no_d0_pme(struct pci_dev *dev)
5581{
5582 pci_info(dev, "PME# does not work under D0, disabling it\n");
5583 dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
5584}
5585DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597static void pci_fixup_no_msi_no_pme(struct pci_dev *dev)
5598{
5599#ifdef CONFIG_PCI_MSI
5600 pci_info(dev, "MSI is not implemented on this device, disabling it\n");
5601 dev->no_msi = 1;
5602#endif
5603 pci_info(dev, "PME# is unreliable, disabling it\n");
5604 dev->pme_support = 0;
5605}
5606DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_msi_no_pme);
5607DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_msi_no_pme);
5608
5609static void apex_pci_fixup_class(struct pci_dev *pdev)
5610{
5611 pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
5612}
5613DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a,
5614 PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
5615