1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/pci.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/acpi.h>
22#include <linux/dmi.h>
23#include <linux/ioport.h>
24#include <linux/sched.h>
25#include <linux/ktime.h>
26#include <linux/mm.h>
27#include <linux/nvme.h>
28#include <linux/platform_data/x86/apple.h>
29#include <linux/pm_runtime.h>
30#include <linux/switchtec.h>
31#include <asm/dma.h>
32#include "pci.h"
33
34static ktime_t fixup_debug_start(struct pci_dev *dev,
35 void (*fn)(struct pci_dev *dev))
36{
37 if (initcall_debug)
38 pci_info(dev, "calling %pS @ %i\n", fn, task_pid_nr(current));
39
40 return ktime_get();
41}
42
43static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
44 void (*fn)(struct pci_dev *dev))
45{
46 ktime_t delta, rettime;
47 unsigned long long duration;
48
49 rettime = ktime_get();
50 delta = ktime_sub(rettime, calltime);
51 duration = (unsigned long long) ktime_to_ns(delta) >> 10;
52 if (initcall_debug || duration > 10000)
53 pci_info(dev, "%pS took %lld usecs\n", fn, duration);
54}
55
56static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
57 struct pci_fixup *end)
58{
59 ktime_t calltime;
60
61 for (; f < end; f++)
62 if ((f->class == (u32) (dev->class >> f->class_shift) ||
63 f->class == (u32) PCI_ANY_ID) &&
64 (f->vendor == dev->vendor ||
65 f->vendor == (u16) PCI_ANY_ID) &&
66 (f->device == dev->device ||
67 f->device == (u16) PCI_ANY_ID)) {
68 void (*hook)(struct pci_dev *dev);
69#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
70 hook = offset_to_ptr(&f->hook_offset);
71#else
72 hook = f->hook;
73#endif
74 calltime = fixup_debug_start(dev, hook);
75 hook(dev);
76 fixup_debug_report(dev, calltime, hook);
77 }
78}
79
80extern struct pci_fixup __start_pci_fixups_early[];
81extern struct pci_fixup __end_pci_fixups_early[];
82extern struct pci_fixup __start_pci_fixups_header[];
83extern struct pci_fixup __end_pci_fixups_header[];
84extern struct pci_fixup __start_pci_fixups_final[];
85extern struct pci_fixup __end_pci_fixups_final[];
86extern struct pci_fixup __start_pci_fixups_enable[];
87extern struct pci_fixup __end_pci_fixups_enable[];
88extern struct pci_fixup __start_pci_fixups_resume[];
89extern struct pci_fixup __end_pci_fixups_resume[];
90extern struct pci_fixup __start_pci_fixups_resume_early[];
91extern struct pci_fixup __end_pci_fixups_resume_early[];
92extern struct pci_fixup __start_pci_fixups_suspend[];
93extern struct pci_fixup __end_pci_fixups_suspend[];
94extern struct pci_fixup __start_pci_fixups_suspend_late[];
95extern struct pci_fixup __end_pci_fixups_suspend_late[];
96
97static bool pci_apply_fixup_final_quirks;
98
99void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
100{
101 struct pci_fixup *start, *end;
102
103 switch (pass) {
104 case pci_fixup_early:
105 start = __start_pci_fixups_early;
106 end = __end_pci_fixups_early;
107 break;
108
109 case pci_fixup_header:
110 start = __start_pci_fixups_header;
111 end = __end_pci_fixups_header;
112 break;
113
114 case pci_fixup_final:
115 if (!pci_apply_fixup_final_quirks)
116 return;
117 start = __start_pci_fixups_final;
118 end = __end_pci_fixups_final;
119 break;
120
121 case pci_fixup_enable:
122 start = __start_pci_fixups_enable;
123 end = __end_pci_fixups_enable;
124 break;
125
126 case pci_fixup_resume:
127 start = __start_pci_fixups_resume;
128 end = __end_pci_fixups_resume;
129 break;
130
131 case pci_fixup_resume_early:
132 start = __start_pci_fixups_resume_early;
133 end = __end_pci_fixups_resume_early;
134 break;
135
136 case pci_fixup_suspend:
137 start = __start_pci_fixups_suspend;
138 end = __end_pci_fixups_suspend;
139 break;
140
141 case pci_fixup_suspend_late:
142 start = __start_pci_fixups_suspend_late;
143 end = __end_pci_fixups_suspend_late;
144 break;
145
146 default:
147
148 return;
149 }
150 pci_do_fixups(dev, start, end);
151}
152EXPORT_SYMBOL(pci_fixup_device);
153
154static int __init pci_apply_final_quirks(void)
155{
156 struct pci_dev *dev = NULL;
157 u8 cls = 0;
158 u8 tmp;
159
160 if (pci_cache_line_size)
161 pr_info("PCI: CLS %u bytes\n", pci_cache_line_size << 2);
162
163 pci_apply_fixup_final_quirks = true;
164 for_each_pci_dev(dev) {
165 pci_fixup_device(pci_fixup_final, dev);
166
167
168
169
170
171 if (!pci_cache_line_size) {
172 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
173 if (!cls)
174 cls = tmp;
175 if (!tmp || cls == tmp)
176 continue;
177
178 pci_info(dev, "CLS mismatch (%u != %u), using %u bytes\n",
179 cls << 2, tmp << 2,
180 pci_dfl_cache_line_size << 2);
181 pci_cache_line_size = pci_dfl_cache_line_size;
182 }
183 }
184
185 if (!pci_cache_line_size) {
186 pr_info("PCI: CLS %u bytes, default %u\n", cls << 2,
187 pci_dfl_cache_line_size << 2);
188 pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
189 }
190
191 return 0;
192}
193fs_initcall_sync(pci_apply_final_quirks);
194
195
196
197
198
199
200
201static void quirk_mmio_always_on(struct pci_dev *dev)
202{
203 dev->mmio_always_on = 1;
204}
205DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
206 PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
207
208
209
210
211
212
213static void quirk_mellanox_tavor(struct pci_dev *dev)
214{
215 dev->broken_parity_status = 1;
216}
217DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
218DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
219
220
221
222
223
224static void quirk_passive_release(struct pci_dev *dev)
225{
226 struct pci_dev *d = NULL;
227 unsigned char dlc;
228
229
230
231
232
233 while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
234 pci_read_config_byte(d, 0x82, &dlc);
235 if (!(dlc & 1<<1)) {
236 pci_info(d, "PIIX3: Enabling Passive Release\n");
237 dlc |= 1<<1;
238 pci_write_config_byte(d, 0x82, dlc);
239 }
240 }
241}
242DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
243DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
244
245
246
247
248
249
250
251
252
253static void quirk_isa_dma_hangs(struct pci_dev *dev)
254{
255 if (!isa_dma_bridge_buggy) {
256 isa_dma_bridge_buggy = 1;
257 pci_info(dev, "Activating ISA DMA hang workarounds\n");
258 }
259}
260
261
262
263
264DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
265DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
266DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
267DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
268DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
269DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
270DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
271
272
273
274
275
276static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
277{
278 u32 pmbase;
279 u16 pm1a;
280
281 pci_read_config_dword(dev, 0x40, &pmbase);
282 pmbase = pmbase & 0xff80;
283 pm1a = inw(pmbase);
284
285 if (pm1a & 0x10) {
286 pci_info(dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
287 outw(0x10, pmbase);
288 }
289}
290DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
291
292
293static void quirk_nopcipci(struct pci_dev *dev)
294{
295 if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
296 pci_info(dev, "Disabling direct PCI/PCI transfers\n");
297 pci_pci_problems |= PCIPCI_FAIL;
298 }
299}
300DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci);
301DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci);
302
303static void quirk_nopciamd(struct pci_dev *dev)
304{
305 u8 rev;
306 pci_read_config_byte(dev, 0x08, &rev);
307 if (rev == 0x13) {
308
309 pci_info(dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
310 pci_pci_problems |= PCIAGP_FAIL;
311 }
312}
313DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd);
314
315
316static void quirk_triton(struct pci_dev *dev)
317{
318 if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
319 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
320 pci_pci_problems |= PCIPCI_TRITON;
321 }
322}
323DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
324DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
325DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
326DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
327
328
329
330
331
332
333
334
335
336
337
338static void quirk_vialatency(struct pci_dev *dev)
339{
340 struct pci_dev *p;
341 u8 busarb;
342
343
344
345
346
347 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
348 if (p != NULL) {
349
350
351
352
353
354
355 if (p->revision < 0x40 || p->revision > 0x42)
356 goto exit;
357 } else {
358 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
359 if (p == NULL)
360 goto exit;
361
362
363 if (p->revision < 0x10 || p->revision > 0x12)
364 goto exit;
365 }
366
367
368
369
370
371
372
373
374
375
376
377
378
379 pci_read_config_byte(dev, 0x76, &busarb);
380
381
382
383
384
385 busarb &= ~(1<<5);
386 busarb |= (1<<4);
387 pci_write_config_byte(dev, 0x76, busarb);
388 pci_info(dev, "Applying VIA southbridge workaround\n");
389exit:
390 pci_dev_put(p);
391}
392DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
393DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
394DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
395
396DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
397DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
398DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
399
400
401static void quirk_viaetbf(struct pci_dev *dev)
402{
403 if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
404 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
405 pci_pci_problems |= PCIPCI_VIAETBF;
406 }
407}
408DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf);
409
410static void quirk_vsfx(struct pci_dev *dev)
411{
412 if ((pci_pci_problems&PCIPCI_VSFX) == 0) {
413 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
414 pci_pci_problems |= PCIPCI_VSFX;
415 }
416}
417DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx);
418
419
420
421
422
423
424static void quirk_alimagik(struct pci_dev *dev)
425{
426 if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) {
427 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
428 pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
429 }
430}
431DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
432DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
433
434
435static void quirk_natoma(struct pci_dev *dev)
436{
437 if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
438 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
439 pci_pci_problems |= PCIPCI_NATOMA;
440 }
441}
442DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
443DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
444DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
445DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
446DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
447DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
448
449
450
451
452
453static void quirk_citrine(struct pci_dev *dev)
454{
455 dev->cfg_size = 0xA0;
456}
457DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
458
459
460
461
462
463static void quirk_nfp6000(struct pci_dev *dev)
464{
465 dev->cfg_size = 0x600;
466}
467DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000);
468DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000);
469DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, quirk_nfp6000);
470DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000);
471
472
473static void quirk_extend_bar_to_page(struct pci_dev *dev)
474{
475 int i;
476
477 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
478 struct resource *r = &dev->resource[i];
479
480 if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
481 r->end = PAGE_SIZE - 1;
482 r->start = 0;
483 r->flags |= IORESOURCE_UNSET;
484 pci_info(dev, "expanded BAR %d to page size: %pR\n",
485 i, r);
486 }
487 }
488}
489DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
490
491
492
493
494
495static void quirk_s3_64M(struct pci_dev *dev)
496{
497 struct resource *r = &dev->resource[0];
498
499 if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
500 r->flags |= IORESOURCE_UNSET;
501 r->start = 0;
502 r->end = 0x3ffffff;
503 }
504}
505DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
506DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
507
508static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
509 const char *name)
510{
511 u32 region;
512 struct pci_bus_region bus_region;
513 struct resource *res = dev->resource + pos;
514
515 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion);
516
517 if (!region)
518 return;
519
520 res->name = pci_name(dev);
521 res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
522 res->flags |=
523 (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
524 region &= ~(size - 1);
525
526
527 bus_region.start = region;
528 bus_region.end = region + size - 1;
529 pcibios_bus_to_resource(dev->bus, res, &bus_region);
530
531 pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
532 name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
533}
534
535
536
537
538
539
540
541
542
543
544static void quirk_cs5536_vsa(struct pci_dev *dev)
545{
546 static char *name = "CS5536 ISA bridge";
547
548 if (pci_resource_len(dev, 0) != 8) {
549 quirk_io(dev, 0, 8, name);
550 quirk_io(dev, 1, 256, name);
551 quirk_io(dev, 2, 64, name);
552 pci_info(dev, "%s bug detected (incorrect header); workaround applied\n",
553 name);
554 }
555}
556DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
557
558static void quirk_io_region(struct pci_dev *dev, int port,
559 unsigned size, int nr, const char *name)
560{
561 u16 region;
562 struct pci_bus_region bus_region;
563 struct resource *res = dev->resource + nr;
564
565 pci_read_config_word(dev, port, ®ion);
566 region &= ~(size - 1);
567
568 if (!region)
569 return;
570
571 res->name = pci_name(dev);
572 res->flags = IORESOURCE_IO;
573
574
575 bus_region.start = region;
576 bus_region.end = region + size - 1;
577 pcibios_bus_to_resource(dev->bus, res, &bus_region);
578
579 if (!pci_claim_resource(dev, nr))
580 pci_info(dev, "quirk: %pR claimed by %s\n", res, name);
581}
582
583
584
585
586
587static void quirk_ati_exploding_mce(struct pci_dev *dev)
588{
589 pci_info(dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
590
591 request_region(0x3b0, 0x0C, "RadeonIGP");
592 request_region(0x3d3, 0x01, "RadeonIGP");
593}
594DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce);
595
596
597
598
599
600
601
602
603
604
605
606
607static void quirk_amd_nl_class(struct pci_dev *pdev)
608{
609 u32 class = pdev->class;
610
611
612 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
613 pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
614 class, pdev->class);
615}
616DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
617 quirk_amd_nl_class);
618
619
620
621
622
623
624
625
626static void quirk_synopsys_haps(struct pci_dev *pdev)
627{
628 u32 class = pdev->class;
629
630 switch (pdev->device) {
631 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3:
632 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI:
633 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31:
634 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
635 pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
636 class, pdev->class);
637 break;
638 }
639}
640DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
641 PCI_CLASS_SERIAL_USB_XHCI, 0,
642 quirk_synopsys_haps);
643
644
645
646
647
648
649
650
651
652
653
654static void quirk_ali7101_acpi(struct pci_dev *dev)
655{
656 quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
657 quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
658}
659DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi);
660
661static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
662{
663 u32 devres;
664 u32 mask, size, base;
665
666 pci_read_config_dword(dev, port, &devres);
667 if ((devres & enable) != enable)
668 return;
669 mask = (devres >> 16) & 15;
670 base = devres & 0xffff;
671 size = 16;
672 for (;;) {
673 unsigned bit = size >> 1;
674 if ((bit & mask) == bit)
675 break;
676 size = bit;
677 }
678
679
680
681
682
683 base &= -size;
684 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
685}
686
687static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
688{
689 u32 devres;
690 u32 mask, size, base;
691
692 pci_read_config_dword(dev, port, &devres);
693 if ((devres & enable) != enable)
694 return;
695 base = devres & 0xffff0000;
696 mask = (devres & 0x3f) << 16;
697 size = 128 << 16;
698 for (;;) {
699 unsigned bit = size >> 1;
700 if ((bit & mask) == bit)
701 break;
702 size = bit;
703 }
704
705
706
707
708
709 base &= -size;
710 pci_info(dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
711}
712
713
714
715
716
717
718
719static void quirk_piix4_acpi(struct pci_dev *dev)
720{
721 u32 res_a;
722
723 quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
724 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
725
726
727 pci_read_config_dword(dev, 0x5c, &res_a);
728
729 piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
730 piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
731
732
733
734
735 if (res_a & (1 << 29)) {
736 piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
737 piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
738 }
739
740 if (res_a & (1 << 30)) {
741 piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
742 piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
743 }
744 piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
745 piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
746}
747DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi);
748DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi);
749
750#define ICH_PMBASE 0x40
751#define ICH_ACPI_CNTL 0x44
752#define ICH4_ACPI_EN 0x10
753#define ICH6_ACPI_EN 0x80
754#define ICH4_GPIOBASE 0x58
755#define ICH4_GPIO_CNTL 0x5c
756#define ICH4_GPIO_EN 0x10
757#define ICH6_GPIOBASE 0x48
758#define ICH6_GPIO_CNTL 0x4c
759#define ICH6_GPIO_EN 0x10
760
761
762
763
764
765
766static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
767{
768 u8 enable;
769
770
771
772
773
774
775
776
777 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
778 if (enable & ICH4_ACPI_EN)
779 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
780 "ICH4 ACPI/GPIO/TCO");
781
782 pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
783 if (enable & ICH4_GPIO_EN)
784 quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
785 "ICH4 GPIO");
786}
787DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
788DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
789DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi);
790DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi);
791DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi);
792DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi);
793DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi);
794DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi);
795DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi);
796DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi);
797
798static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
799{
800 u8 enable;
801
802 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
803 if (enable & ICH6_ACPI_EN)
804 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
805 "ICH6 ACPI/GPIO/TCO");
806
807 pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
808 if (enable & ICH6_GPIO_EN)
809 quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
810 "ICH6 GPIO");
811}
812
813static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
814 const char *name, int dynsize)
815{
816 u32 val;
817 u32 size, base;
818
819 pci_read_config_dword(dev, reg, &val);
820
821
822 if (!(val & 1))
823 return;
824 base = val & 0xfffc;
825 if (dynsize) {
826
827
828
829
830
831
832 size = 16;
833 } else {
834 size = 128;
835 }
836 base &= ~(size-1);
837
838
839
840
841
842 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
843}
844
845static void quirk_ich6_lpc(struct pci_dev *dev)
846{
847
848 ich6_lpc_acpi_gpio(dev);
849
850
851 ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
852 ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
853}
854DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
855DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
856
857static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
858 const char *name)
859{
860 u32 val;
861 u32 mask, base;
862
863 pci_read_config_dword(dev, reg, &val);
864
865
866 if (!(val & 1))
867 return;
868
869
870 base = val & 0xfffc;
871 mask = (val >> 16) & 0xfc;
872 mask |= 3;
873
874
875
876
877
878 pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
879}
880
881
882static void quirk_ich7_lpc(struct pci_dev *dev)
883{
884
885 ich6_lpc_acpi_gpio(dev);
886
887
888 ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
889 ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
890 ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
891 ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
892}
893DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc);
894DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc);
895DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc);
896DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc);
897DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc);
898DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc);
899DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc);
900DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc);
901DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc);
902DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc);
903DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc);
904DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc);
905DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc);
906
907
908
909
910
911static void quirk_vt82c586_acpi(struct pci_dev *dev)
912{
913 if (dev->revision & 0x10)
914 quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
915 "vt82c586 ACPI");
916}
917DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi);
918
919
920
921
922
923
924
925static void quirk_vt82c686_acpi(struct pci_dev *dev)
926{
927 quirk_vt82c586_acpi(dev);
928
929 quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
930 "vt82c686 HW-mon");
931
932 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
933}
934DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi);
935
936
937
938
939
940
941static void quirk_vt8235_acpi(struct pci_dev *dev)
942{
943 quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
944 quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
945}
946DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
947
948
949
950
951
952static void quirk_xio2000a(struct pci_dev *dev)
953{
954 struct pci_dev *pdev;
955 u16 command;
956
957 pci_warn(dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
958 list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
959 pci_read_config_word(pdev, PCI_COMMAND, &command);
960 if (command & PCI_COMMAND_FAST_BACK)
961 pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
962 }
963}
964DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
965 quirk_xio2000a);
966
967#ifdef CONFIG_X86_IO_APIC
968
969#include <asm/io_apic.h>
970
971
972
973
974
975
976
977
978static void quirk_via_ioapic(struct pci_dev *dev)
979{
980 u8 tmp;
981
982 if (nr_ioapics < 1)
983 tmp = 0;
984 else
985 tmp = 0x1f;
986
987 pci_info(dev, "%sbling VIA external APIC routing\n",
988 tmp == 0 ? "Disa" : "Ena");
989
990
991 pci_write_config_byte(dev, 0x58, tmp);
992}
993DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
994DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
995
996
997
998
999
1000
1001
1002static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
1003{
1004 u8 misc_control2;
1005#define BYPASS_APIC_DEASSERT 8
1006
1007 pci_read_config_byte(dev, 0x5B, &misc_control2);
1008 if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
1009 pci_info(dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
1010 pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
1011 }
1012}
1013DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
1014DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025static void quirk_amd_ioapic(struct pci_dev *dev)
1026{
1027 if (dev->revision >= 0x02) {
1028 pci_warn(dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
1029 pci_warn(dev, " : booting with the \"noapic\" option\n");
1030 }
1031}
1032DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
1033#endif
1034
1035#if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS)
1036
1037static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
1038{
1039
1040 if (dev->subsystem_device == 0xa118)
1041 dev->sriov->link = dev->devfn;
1042}
1043DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link);
1044#endif
1045
1046
1047
1048
1049
1050static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
1051{
1052 if (dev->subordinate && dev->revision <= 0x12) {
1053 pci_info(dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
1054 dev->revision);
1055 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
1056 }
1057}
1058DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
1059
1060
1061
1062
1063
1064
1065
1066
1067static void quirk_via_acpi(struct pci_dev *d)
1068{
1069 u8 irq;
1070
1071
1072 pci_read_config_byte(d, 0x42, &irq);
1073 irq &= 0xf;
1074 if (irq && (irq != 2))
1075 d->irq = irq;
1076}
1077DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi);
1078DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi);
1079
1080
1081static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
1082
1083static void quirk_via_bridge(struct pci_dev *dev)
1084{
1085
1086 switch (dev->device) {
1087 case PCI_DEVICE_ID_VIA_82C686:
1088
1089
1090
1091
1092
1093 via_vlink_dev_lo = PCI_SLOT(dev->devfn);
1094 via_vlink_dev_hi = PCI_SLOT(dev->devfn);
1095 break;
1096 case PCI_DEVICE_ID_VIA_8237:
1097 case PCI_DEVICE_ID_VIA_8237A:
1098 via_vlink_dev_lo = 15;
1099 break;
1100 case PCI_DEVICE_ID_VIA_8235:
1101 via_vlink_dev_lo = 16;
1102 break;
1103 case PCI_DEVICE_ID_VIA_8231:
1104 case PCI_DEVICE_ID_VIA_8233_0:
1105 case PCI_DEVICE_ID_VIA_8233A:
1106 case PCI_DEVICE_ID_VIA_8233C_0:
1107 via_vlink_dev_lo = 17;
1108 break;
1109 }
1110}
1111DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_bridge);
1112DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, quirk_via_bridge);
1113DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0, quirk_via_bridge);
1114DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A, quirk_via_bridge);
1115DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233C_0, quirk_via_bridge);
1116DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_bridge);
1117DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge);
1118DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge);
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131static void quirk_via_vlink(struct pci_dev *dev)
1132{
1133 u8 irq, new_irq;
1134
1135
1136 if (via_vlink_dev_lo == -1)
1137 return;
1138
1139 new_irq = dev->irq;
1140
1141
1142 if (!new_irq || new_irq > 15)
1143 return;
1144
1145
1146 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi ||
1147 PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
1148 return;
1149
1150
1151
1152
1153
1154 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1155 if (new_irq != irq) {
1156 pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n",
1157 irq, new_irq);
1158 udelay(15);
1159 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
1160 }
1161}
1162DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
1163
1164
1165
1166
1167
1168
1169static void quirk_vt82c598_id(struct pci_dev *dev)
1170{
1171 pci_write_config_byte(dev, 0xfc, 0);
1172 pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
1173}
1174DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id);
1175
1176
1177
1178
1179
1180
1181
1182static void quirk_cardbus_legacy(struct pci_dev *dev)
1183{
1184 pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
1185}
1186DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1187 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
1188DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
1189 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
1190
1191
1192
1193
1194
1195
1196
1197
1198static void quirk_amd_ordering(struct pci_dev *dev)
1199{
1200 u32 pcic;
1201 pci_read_config_dword(dev, 0x4C, &pcic);
1202 if ((pcic & 6) != 6) {
1203 pcic |= 6;
1204 pci_warn(dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
1205 pci_write_config_dword(dev, 0x4C, pcic);
1206 pci_read_config_dword(dev, 0x84, &pcic);
1207 pcic |= (1 << 23);
1208 pci_write_config_dword(dev, 0x84, pcic);
1209 }
1210}
1211DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1212DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1213
1214
1215
1216
1217
1218
1219
1220
1221static void quirk_dunord(struct pci_dev *dev)
1222{
1223 struct resource *r = &dev->resource[1];
1224
1225 r->flags |= IORESOURCE_UNSET;
1226 r->start = 0;
1227 r->end = 0xffffff;
1228}
1229DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
1230
1231
1232
1233
1234
1235
1236static void quirk_transparent_bridge(struct pci_dev *dev)
1237{
1238 dev->transparent = 1;
1239}
1240DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge);
1241DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge);
1242
1243
1244
1245
1246
1247
1248
1249static void quirk_mediagx_master(struct pci_dev *dev)
1250{
1251 u8 reg;
1252
1253 pci_read_config_byte(dev, 0x41, ®);
1254 if (reg & 2) {
1255 reg &= ~2;
1256 pci_info(dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
1257 reg);
1258 pci_write_config_byte(dev, 0x41, reg);
1259 }
1260}
1261DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1262DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1263
1264
1265
1266
1267
1268
1269static void quirk_disable_pxb(struct pci_dev *pdev)
1270{
1271 u16 config;
1272
1273 if (pdev->revision != 0x04)
1274 return;
1275 pci_read_config_word(pdev, 0x40, &config);
1276 if (config & (1<<6)) {
1277 config &= ~(1<<6);
1278 pci_write_config_word(pdev, 0x40, config);
1279 pci_info(pdev, "C0 revision 450NX. Disabling PCI restreaming\n");
1280 }
1281}
1282DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
1283DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
1284
1285static void quirk_amd_ide_mode(struct pci_dev *pdev)
1286{
1287
1288 u8 tmp;
1289
1290 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
1291 if (tmp == 0x01) {
1292 pci_read_config_byte(pdev, 0x40, &tmp);
1293 pci_write_config_byte(pdev, 0x40, tmp|1);
1294 pci_write_config_byte(pdev, 0x9, 1);
1295 pci_write_config_byte(pdev, 0xa, 6);
1296 pci_write_config_byte(pdev, 0x40, tmp);
1297
1298 pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
1299 pci_info(pdev, "set SATA to AHCI mode\n");
1300 }
1301}
1302DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1303DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1304DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1305DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1306DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1307DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1308DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1309DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1310
1311
1312static void quirk_svwks_csb5ide(struct pci_dev *pdev)
1313{
1314 u8 prog;
1315 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1316 if (prog & 5) {
1317 prog &= ~5;
1318 pdev->class &= ~5;
1319 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1320
1321 }
1322}
1323DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
1324
1325
1326static void quirk_ide_samemode(struct pci_dev *pdev)
1327{
1328 u8 prog;
1329
1330 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1331
1332 if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
1333 pci_info(pdev, "IDE mode mismatch; forcing legacy mode\n");
1334 prog &= ~5;
1335 pdev->class &= ~5;
1336 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1337 }
1338}
1339DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
1340
1341
1342static void quirk_no_ata_d3(struct pci_dev *pdev)
1343{
1344 pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
1345}
1346
1347DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID,
1348 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1349DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
1350 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1351
1352DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
1353 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1354
1355
1356DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
1357 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1358
1359
1360
1361
1362
1363static void quirk_eisa_bridge(struct pci_dev *dev)
1364{
1365 dev->class = PCI_CLASS_BRIDGE_EISA << 8;
1366}
1367DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge);
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394static int asus_hides_smbus;
1395
1396static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
1397{
1398 if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1399 if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
1400 switch (dev->subsystem_device) {
1401 case 0x8025:
1402 case 0x8070:
1403 case 0x8088:
1404 case 0x1626:
1405 asus_hides_smbus = 1;
1406 }
1407 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
1408 switch (dev->subsystem_device) {
1409 case 0x80b1:
1410 case 0x80b2:
1411 case 0x8093:
1412 asus_hides_smbus = 1;
1413 }
1414 else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
1415 switch (dev->subsystem_device) {
1416 case 0x8030:
1417 asus_hides_smbus = 1;
1418 }
1419 else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
1420 switch (dev->subsystem_device) {
1421 case 0x8070:
1422 asus_hides_smbus = 1;
1423 }
1424 else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
1425 switch (dev->subsystem_device) {
1426 case 0x80c9:
1427 asus_hides_smbus = 1;
1428 }
1429 else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
1430 switch (dev->subsystem_device) {
1431 case 0x1751:
1432 case 0x1821:
1433 case 0x1897:
1434 asus_hides_smbus = 1;
1435 }
1436 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1437 switch (dev->subsystem_device) {
1438 case 0x184b:
1439 case 0x186a:
1440 asus_hides_smbus = 1;
1441 }
1442 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1443 switch (dev->subsystem_device) {
1444 case 0x80f2:
1445 asus_hides_smbus = 1;
1446 }
1447 else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
1448 switch (dev->subsystem_device) {
1449 case 0x1882:
1450 case 0x1977:
1451 asus_hides_smbus = 1;
1452 }
1453 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
1454 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1455 switch (dev->subsystem_device) {
1456 case 0x088C:
1457 case 0x0890:
1458 asus_hides_smbus = 1;
1459 }
1460 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1461 switch (dev->subsystem_device) {
1462 case 0x12bc:
1463 case 0x12bd:
1464 case 0x006a:
1465 asus_hides_smbus = 1;
1466 }
1467 else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
1468 switch (dev->subsystem_device) {
1469 case 0x12bf:
1470 asus_hides_smbus = 1;
1471 }
1472 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
1473 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1474 switch (dev->subsystem_device) {
1475 case 0xC00C:
1476 asus_hides_smbus = 1;
1477 }
1478 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
1479 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1480 switch (dev->subsystem_device) {
1481 case 0x0058:
1482 asus_hides_smbus = 1;
1483 }
1484 else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
1485 switch (dev->subsystem_device) {
1486 case 0xB16C:
1487
1488
1489
1490 asus_hides_smbus = 1;
1491 }
1492 else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
1493 switch (dev->subsystem_device) {
1494 case 0x00b8:
1495 case 0x00b9:
1496 case 0x00ba:
1497
1498
1499
1500
1501
1502 asus_hides_smbus = 1;
1503 }
1504 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
1505 switch (dev->subsystem_device) {
1506 case 0x001A:
1507
1508
1509
1510 asus_hides_smbus = 1;
1511 }
1512 }
1513}
1514DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge);
1515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge);
1516DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge);
1517DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge);
1518DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, asus_hides_smbus_hostbridge);
1519DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge);
1520DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge);
1521DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge);
1522DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge);
1523DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
1524
1525DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
1526DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
1527DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
1528
1529static void asus_hides_smbus_lpc(struct pci_dev *dev)
1530{
1531 u16 val;
1532
1533 if (likely(!asus_hides_smbus))
1534 return;
1535
1536 pci_read_config_word(dev, 0xF2, &val);
1537 if (val & 0x8) {
1538 pci_write_config_word(dev, 0xF2, val & (~0x8));
1539 pci_read_config_word(dev, 0xF2, &val);
1540 if (val & 0x8)
1541 pci_info(dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
1542 val);
1543 else
1544 pci_info(dev, "Enabled i801 SMBus device\n");
1545 }
1546}
1547DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1548DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1549DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1550DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1551DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1552DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1553DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1554DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1555DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1556DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1557DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1558DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1559DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1560DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1561
1562
1563static void __iomem *asus_rcba_base;
1564static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
1565{
1566 u32 rcba;
1567
1568 if (likely(!asus_hides_smbus))
1569 return;
1570 WARN_ON(asus_rcba_base);
1571
1572 pci_read_config_dword(dev, 0xF0, &rcba);
1573
1574 asus_rcba_base = ioremap(rcba & 0xFFFFC000, 0x4000);
1575 if (asus_rcba_base == NULL)
1576 return;
1577}
1578
1579static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
1580{
1581 u32 val;
1582
1583 if (likely(!asus_hides_smbus || !asus_rcba_base))
1584 return;
1585
1586
1587 val = readl(asus_rcba_base + 0x3418);
1588
1589
1590 writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418);
1591}
1592
1593static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
1594{
1595 if (likely(!asus_hides_smbus || !asus_rcba_base))
1596 return;
1597
1598 iounmap(asus_rcba_base);
1599 asus_rcba_base = NULL;
1600 pci_info(dev, "Enabled ICH6/i801 SMBus device\n");
1601}
1602
1603static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1604{
1605 asus_hides_smbus_lpc_ich6_suspend(dev);
1606 asus_hides_smbus_lpc_ich6_resume_early(dev);
1607 asus_hides_smbus_lpc_ich6_resume(dev);
1608}
1609DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6);
1610DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend);
1611DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
1612DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
1613
1614
1615static void quirk_sis_96x_smbus(struct pci_dev *dev)
1616{
1617 u8 val = 0;
1618 pci_read_config_byte(dev, 0x77, &val);
1619 if (val & 0x10) {
1620 pci_info(dev, "Enabling SiS 96x SMBus\n");
1621 pci_write_config_byte(dev, 0x77, val & ~0x10);
1622 }
1623}
1624DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1625DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1626DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1627DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1628DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1629DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1630DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1631DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641#define SIS_DETECT_REGISTER 0x40
1642
1643static void quirk_sis_503(struct pci_dev *dev)
1644{
1645 u8 reg;
1646 u16 devid;
1647
1648 pci_read_config_byte(dev, SIS_DETECT_REGISTER, ®);
1649 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
1650 pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
1651 if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
1652 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
1653 return;
1654 }
1655
1656
1657
1658
1659
1660
1661 dev->device = devid;
1662 quirk_sis_96x_smbus(dev);
1663}
1664DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1665DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1666
1667
1668
1669
1670
1671
1672
1673static void asus_hides_ac97_lpc(struct pci_dev *dev)
1674{
1675 u8 val;
1676 int asus_hides_ac97 = 0;
1677
1678 if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1679 if (dev->device == PCI_DEVICE_ID_VIA_8237)
1680 asus_hides_ac97 = 1;
1681 }
1682
1683 if (!asus_hides_ac97)
1684 return;
1685
1686 pci_read_config_byte(dev, 0x50, &val);
1687 if (val & 0xc0) {
1688 pci_write_config_byte(dev, 0x50, val & (~0xc0));
1689 pci_read_config_byte(dev, 0x50, &val);
1690 if (val & 0xc0)
1691 pci_info(dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
1692 val);
1693 else
1694 pci_info(dev, "Enabled onboard AC97/MC97 devices\n");
1695 }
1696}
1697DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1698DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1699
1700#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
1701
1702
1703
1704
1705
1706static void quirk_jmicron_ata(struct pci_dev *pdev)
1707{
1708 u32 conf1, conf5, class;
1709 u8 hdr;
1710
1711
1712 if (PCI_FUNC(pdev->devfn))
1713 return;
1714
1715 pci_read_config_dword(pdev, 0x40, &conf1);
1716 pci_read_config_dword(pdev, 0x80, &conf5);
1717
1718 conf1 &= ~0x00CFF302;
1719 conf5 &= ~(1 << 24);
1720
1721 switch (pdev->device) {
1722 case PCI_DEVICE_ID_JMICRON_JMB360:
1723 case PCI_DEVICE_ID_JMICRON_JMB362:
1724 case PCI_DEVICE_ID_JMICRON_JMB364:
1725
1726 conf1 |= 0x0002A100;
1727 break;
1728
1729 case PCI_DEVICE_ID_JMICRON_JMB365:
1730 case PCI_DEVICE_ID_JMICRON_JMB366:
1731
1732 conf5 |= (1 << 24);
1733 fallthrough;
1734 case PCI_DEVICE_ID_JMICRON_JMB361:
1735 case PCI_DEVICE_ID_JMICRON_JMB363:
1736 case PCI_DEVICE_ID_JMICRON_JMB369:
1737
1738
1739 conf1 |= 0x00C2A1B3;
1740 break;
1741
1742 case PCI_DEVICE_ID_JMICRON_JMB368:
1743
1744 conf1 |= 0x00C00000;
1745 break;
1746 }
1747
1748 pci_write_config_dword(pdev, 0x40, conf1);
1749 pci_write_config_dword(pdev, 0x80, conf5);
1750
1751
1752 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
1753 pdev->hdr_type = hdr & 0x7f;
1754 pdev->multifunction = !!(hdr & 0x80);
1755
1756 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
1757 pdev->class = class >> 8;
1758}
1759DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1760DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1761DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1762DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1763DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1764DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1765DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1766DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1767DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1768DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1769DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1770DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1771DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1772DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1773DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1774DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1775DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1776DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1777
1778#endif
1779
1780static void quirk_jmicron_async_suspend(struct pci_dev *dev)
1781{
1782 if (dev->multifunction) {
1783 device_disable_async_suspend(&dev->dev);
1784 pci_info(dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
1785 }
1786}
1787DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend);
1788DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend);
1789DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend);
1790DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend);
1791
1792#ifdef CONFIG_X86_IO_APIC
1793static void quirk_alder_ioapic(struct pci_dev *pdev)
1794{
1795 int i;
1796
1797 if ((pdev->class >> 8) != 0xff00)
1798 return;
1799
1800
1801
1802
1803
1804
1805 if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
1806 insert_resource(&iomem_resource, &pdev->resource[0]);
1807
1808
1809
1810
1811
1812 for (i = 1; i < PCI_STD_NUM_BARS; i++)
1813 memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
1814}
1815DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
1816#endif
1817
1818static void quirk_pcie_mch(struct pci_dev *pdev)
1819{
1820 pdev->no_msi = 1;
1821}
1822DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
1823DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
1824DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
1825
1826DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
1827
1828
1829
1830
1831
1832static void quirk_pcie_pxh(struct pci_dev *dev)
1833{
1834 dev->no_msi = 1;
1835 pci_warn(dev, "PXH quirk detected; SHPC device MSI disabled\n");
1836}
1837DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh);
1838DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh);
1839DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh);
1840DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh);
1841DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
1842
1843
1844
1845
1846
1847static void quirk_intel_pcie_pm(struct pci_dev *dev)
1848{
1849 pci_pm_d3hot_delay = 120;
1850 dev->no_d1d2 = 1;
1851}
1852DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
1853DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm);
1854DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm);
1855DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm);
1856DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm);
1857DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm);
1858DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm);
1859DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm);
1860DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm);
1861DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm);
1862DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm);
1863DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm);
1864DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm);
1865DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm);
1866DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm);
1867DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm);
1868DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm);
1869DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm);
1870DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
1871DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
1872DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
1873
1874static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay)
1875{
1876 if (dev->d3hot_delay >= delay)
1877 return;
1878
1879 dev->d3hot_delay = delay;
1880 pci_info(dev, "extending delay after power-on from D3hot to %d msec\n",
1881 dev->d3hot_delay);
1882}
1883
1884static void quirk_radeon_pm(struct pci_dev *dev)
1885{
1886 if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1887 dev->subsystem_device == 0x00e2)
1888 quirk_d3hot_delay(dev, 20);
1889}
1890DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
1902{
1903 quirk_d3hot_delay(dev, 20);
1904}
1905DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
1906DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
1907
1908#ifdef CONFIG_X86_IO_APIC
1909static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
1910{
1911 noioapicreroute = 1;
1912 pr_info("%s detected: disable boot interrupt reroute\n", d->ident);
1913
1914 return 0;
1915}
1916
1917static const struct dmi_system_id boot_interrupt_dmi_table[] = {
1918
1919
1920
1921 {
1922 .callback = dmi_disable_ioapicreroute,
1923 .ident = "ASUSTek Computer INC. M2N-LR",
1924 .matches = {
1925 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer INC."),
1926 DMI_MATCH(DMI_PRODUCT_NAME, "M2N-LR"),
1927 },
1928 },
1929 {}
1930};
1931
1932
1933
1934
1935
1936
1937
1938static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
1939{
1940 dmi_check_system(boot_interrupt_dmi_table);
1941 if (noioapicquirk || noioapicreroute)
1942 return;
1943
1944 dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
1945 pci_info(dev, "rerouting interrupts for [%04x:%04x]\n",
1946 dev->vendor, dev->device);
1947}
1948DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1949DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1950DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1951DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1952DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1953DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1954DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1955DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1956DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1957DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1958DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1959DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1960DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1961DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1962DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1963DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984#define INTEL_6300_IOAPIC_ABAR 0x40
1985#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
1986
1987#define INTEL_CIPINTRC_CFG_OFFSET 0x14C
1988#define INTEL_CIPINTRC_DIS_INTX_ICH (1<<25)
1989
1990static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
1991{
1992 u16 pci_config_word;
1993 u32 pci_config_dword;
1994
1995 if (noioapicquirk)
1996 return;
1997
1998 switch (dev->device) {
1999 case PCI_DEVICE_ID_INTEL_ESB_10:
2000 pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR,
2001 &pci_config_word);
2002 pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
2003 pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR,
2004 pci_config_word);
2005 break;
2006 case 0x3c28:
2007 case 0x0e28:
2008 case 0x2f28:
2009 case 0x6f28:
2010 case 0x2034:
2011 pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
2012 &pci_config_dword);
2013 pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH;
2014 pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
2015 pci_config_dword);
2016 break;
2017 default:
2018 return;
2019 }
2020 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2021 dev->vendor, dev->device);
2022}
2023
2024
2025
2026
2027DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
2028 quirk_disable_intel_boot_interrupt);
2029DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
2030 quirk_disable_intel_boot_interrupt);
2031
2032
2033
2034
2035
2036
2037
2038
2039DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x3c28,
2040 quirk_disable_intel_boot_interrupt);
2041DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0e28,
2042 quirk_disable_intel_boot_interrupt);
2043DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2f28,
2044 quirk_disable_intel_boot_interrupt);
2045DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x6f28,
2046 quirk_disable_intel_boot_interrupt);
2047DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2034,
2048 quirk_disable_intel_boot_interrupt);
2049DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x3c28,
2050 quirk_disable_intel_boot_interrupt);
2051DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x0e28,
2052 quirk_disable_intel_boot_interrupt);
2053DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2f28,
2054 quirk_disable_intel_boot_interrupt);
2055DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x6f28,
2056 quirk_disable_intel_boot_interrupt);
2057DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2034,
2058 quirk_disable_intel_boot_interrupt);
2059
2060
2061#define BC_HT1000_FEATURE_REG 0x64
2062#define BC_HT1000_PIC_REGS_ENABLE (1<<0)
2063#define BC_HT1000_MAP_IDX 0xC00
2064#define BC_HT1000_MAP_DATA 0xC01
2065
2066static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
2067{
2068 u32 pci_config_dword;
2069 u8 irq;
2070
2071 if (noioapicquirk)
2072 return;
2073
2074 pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
2075 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
2076 BC_HT1000_PIC_REGS_ENABLE);
2077
2078 for (irq = 0x10; irq < 0x10 + 32; irq++) {
2079 outb(irq, BC_HT1000_MAP_IDX);
2080 outb(0x00, BC_HT1000_MAP_DATA);
2081 }
2082
2083 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
2084
2085 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2086 dev->vendor, dev->device);
2087}
2088DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
2089DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
2090
2091
2092
2093
2094
2095
2096
2097
2098#define AMD_813X_MISC 0x40
2099#define AMD_813X_NOIOAMODE (1<<0)
2100#define AMD_813X_REV_B1 0x12
2101#define AMD_813X_REV_B2 0x13
2102
2103static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
2104{
2105 u32 pci_config_dword;
2106
2107 if (noioapicquirk)
2108 return;
2109 if ((dev->revision == AMD_813X_REV_B1) ||
2110 (dev->revision == AMD_813X_REV_B2))
2111 return;
2112
2113 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
2114 pci_config_dword &= ~AMD_813X_NOIOAMODE;
2115 pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
2116
2117 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2118 dev->vendor, dev->device);
2119}
2120DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2121DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2122DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2123DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2124
2125#define AMD_8111_PCI_IRQ_ROUTING 0x56
2126
2127static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
2128{
2129 u16 pci_config_word;
2130
2131 if (noioapicquirk)
2132 return;
2133
2134 pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
2135 if (!pci_config_word) {
2136 pci_info(dev, "boot interrupts on device [%04x:%04x] already disabled\n",
2137 dev->vendor, dev->device);
2138 return;
2139 }
2140 pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
2141 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2142 dev->vendor, dev->device);
2143}
2144DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
2145DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
2146#endif
2147
2148
2149
2150
2151
2152
2153static void quirk_tc86c001_ide(struct pci_dev *dev)
2154{
2155 struct resource *r = &dev->resource[0];
2156
2157 if (r->start & 0x8) {
2158 r->flags |= IORESOURCE_UNSET;
2159 r->start = 0;
2160 r->end = 0xf;
2161 }
2162}
2163DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
2164 PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
2165 quirk_tc86c001_ide);
2166
2167
2168
2169
2170
2171
2172
2173
2174static void quirk_plx_pci9050(struct pci_dev *dev)
2175{
2176 unsigned int bar;
2177
2178
2179 if (dev->revision >= 2)
2180 return;
2181 for (bar = 0; bar <= 1; bar++)
2182 if (pci_resource_len(dev, bar) == 0x80 &&
2183 (pci_resource_start(dev, bar) & 0x80)) {
2184 struct resource *r = &dev->resource[bar];
2185 pci_info(dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
2186 bar);
2187 r->flags |= IORESOURCE_UNSET;
2188 r->start = 0;
2189 r->end = 0xff;
2190 }
2191}
2192DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
2193 quirk_plx_pci9050);
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050);
2204DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050);
2205
2206static void quirk_netmos(struct pci_dev *dev)
2207{
2208 unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
2209 unsigned int num_serial = dev->subsystem_device & 0xf;
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221 switch (dev->device) {
2222 case PCI_DEVICE_ID_NETMOS_9835:
2223
2224 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
2225 dev->subsystem_device == 0x0299)
2226 return;
2227 fallthrough;
2228 case PCI_DEVICE_ID_NETMOS_9735:
2229 case PCI_DEVICE_ID_NETMOS_9745:
2230 case PCI_DEVICE_ID_NETMOS_9845:
2231 case PCI_DEVICE_ID_NETMOS_9855:
2232 if (num_parallel) {
2233 pci_info(dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
2234 dev->device, num_parallel, num_serial);
2235 dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
2236 (dev->class & 0xff);
2237 }
2238 }
2239}
2240DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
2241 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
2242
2243static void quirk_e100_interrupt(struct pci_dev *dev)
2244{
2245 u16 command, pmcsr;
2246 u8 __iomem *csr;
2247 u8 cmd_hi;
2248
2249 switch (dev->device) {
2250
2251 case 0x1029:
2252 case 0x1030 ... 0x1034:
2253 case 0x1038 ... 0x103E:
2254 case 0x1050 ... 0x1057:
2255 case 0x1059:
2256 case 0x1064 ... 0x106B:
2257 case 0x1091 ... 0x1095:
2258 case 0x1209:
2259 case 0x1229:
2260 case 0x2449:
2261 case 0x2459:
2262 case 0x245D:
2263 case 0x27DC:
2264 break;
2265 default:
2266 return;
2267 }
2268
2269
2270
2271
2272
2273
2274
2275
2276 pci_read_config_word(dev, PCI_COMMAND, &command);
2277
2278 if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
2279 return;
2280
2281
2282
2283
2284
2285 if (dev->pm_cap) {
2286 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2287 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
2288 return;
2289 }
2290
2291
2292 csr = ioremap(pci_resource_start(dev, 0), 8);
2293 if (!csr) {
2294 pci_warn(dev, "Can't map e100 registers\n");
2295 return;
2296 }
2297
2298 cmd_hi = readb(csr + 3);
2299 if (cmd_hi == 0) {
2300 pci_warn(dev, "Firmware left e100 interrupts enabled; disabling\n");
2301 writeb(1, csr + 3);
2302 }
2303
2304 iounmap(csr);
2305}
2306DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
2307 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt);
2308
2309
2310
2311
2312
2313static void quirk_disable_aspm_l0s(struct pci_dev *dev)
2314{
2315 pci_info(dev, "Disabling L0s\n");
2316 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
2317}
2318DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
2319DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
2320DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
2321DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
2322DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
2323DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
2324DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
2325DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
2326DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
2327DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
2328DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
2329DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
2330DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
2331DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
2332
2333static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
2334{
2335 pci_info(dev, "Disabling ASPM L0s/L1\n");
2336 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
2337}
2338
2339
2340
2341
2342
2343
2344DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
2355{
2356 dev->clear_retrain_link = 1;
2357 pci_info(dev, "Enable PCIe Retrain Link quirk\n");
2358}
2359DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
2360DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
2361DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
2362
2363static void fixup_rev1_53c810(struct pci_dev *dev)
2364{
2365 u32 class = dev->class;
2366
2367
2368
2369
2370
2371 if (class)
2372 return;
2373
2374 dev->class = PCI_CLASS_STORAGE_SCSI << 8;
2375 pci_info(dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n",
2376 class, dev->class);
2377}
2378DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
2379
2380
2381static void quirk_p64h2_1k_io(struct pci_dev *dev)
2382{
2383 u16 en1k;
2384
2385 pci_read_config_word(dev, 0x40, &en1k);
2386
2387 if (en1k & 0x200) {
2388 pci_info(dev, "Enable I/O Space to 1KB granularity\n");
2389 dev->io_window_1k = 1;
2390 }
2391}
2392DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
2393
2394
2395
2396
2397
2398
2399static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
2400{
2401 uint8_t b;
2402
2403 if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
2404 if (!(b & 0x20)) {
2405 pci_write_config_byte(dev, 0xf41, b | 0x20);
2406 pci_info(dev, "Linking AER extended capability\n");
2407 }
2408 }
2409}
2410DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2411 quirk_nvidia_ck804_pcie_aer_ext_cap);
2412DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2413 quirk_nvidia_ck804_pcie_aer_ext_cap);
2414
2415static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
2416{
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428 struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,
2429 PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
2430 uint8_t b;
2431
2432
2433
2434
2435
2436 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
2437 if (!p)
2438 return;
2439 pci_dev_put(p);
2440
2441 if (pci_read_config_byte(dev, 0x76, &b) == 0) {
2442 if (b & 0x40) {
2443
2444 pci_write_config_byte(dev, 0x76, b ^ 0x40);
2445
2446 pci_info(dev, "Disabling VIA CX700 PCI parking\n");
2447 }
2448 }
2449
2450 if (pci_read_config_byte(dev, 0x72, &b) == 0) {
2451 if (b != 0) {
2452
2453 pci_write_config_byte(dev, 0x72, 0x0);
2454
2455
2456 pci_write_config_byte(dev, 0x75, 0x1);
2457
2458
2459 pci_write_config_byte(dev, 0x77, 0x0);
2460
2461 pci_info(dev, "Disabling VIA CX700 PCI caching\n");
2462 }
2463 }
2464}
2465DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
2466
2467static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
2468{
2469 u32 rev;
2470
2471 pci_read_config_dword(dev, 0xf4, &rev);
2472
2473
2474 if (rev == 0x05719000) {
2475 int readrq = pcie_get_readrq(dev);
2476 if (readrq > 2048)
2477 pcie_set_readrq(dev, 2048);
2478 }
2479}
2480DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
2481 PCI_DEVICE_ID_TIGON3_5719,
2482 quirk_brcm_5719_limit_mrrs);
2483
2484
2485
2486
2487
2488
2489
2490static void quirk_unhide_mch_dev6(struct pci_dev *dev)
2491{
2492 u8 reg;
2493
2494 if (pci_read_config_byte(dev, 0xF4, ®) == 0 && !(reg & 0x02)) {
2495 pci_info(dev, "Enabling MCH 'Overflow' Device\n");
2496 pci_write_config_byte(dev, 0xF4, reg | 0x02);
2497 }
2498}
2499DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
2500 quirk_unhide_mch_dev6);
2501DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
2502 quirk_unhide_mch_dev6);
2503
2504#ifdef CONFIG_PCI_MSI
2505
2506
2507
2508
2509
2510
2511
2512static void quirk_disable_all_msi(struct pci_dev *dev)
2513{
2514 pci_no_msi();
2515 pci_warn(dev, "MSI quirk detected; MSI disabled\n");
2516}
2517DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
2518DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
2519DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
2520DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
2521DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
2522DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
2523DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
2524DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
2525
2526
2527static void quirk_disable_msi(struct pci_dev *dev)
2528{
2529 if (dev->subordinate) {
2530 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2531 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2532 }
2533}
2534DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
2535DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
2536DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
2537
2538
2539
2540
2541
2542
2543
2544static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
2545{
2546 struct pci_dev *apc_bridge;
2547
2548 apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0));
2549 if (apc_bridge) {
2550 if (apc_bridge->device == 0x9602)
2551 quirk_disable_msi(apc_bridge);
2552 pci_dev_put(apc_bridge);
2553 }
2554}
2555DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
2556DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
2557
2558
2559
2560
2561
2562static int msi_ht_cap_enabled(struct pci_dev *dev)
2563{
2564 int pos, ttl = PCI_FIND_CAP_TTL;
2565
2566 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2567 while (pos && ttl--) {
2568 u8 flags;
2569
2570 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2571 &flags) == 0) {
2572 pci_info(dev, "Found %s HT MSI Mapping\n",
2573 flags & HT_MSI_FLAGS_ENABLE ?
2574 "enabled" : "disabled");
2575 return (flags & HT_MSI_FLAGS_ENABLE) != 0;
2576 }
2577
2578 pos = pci_find_next_ht_capability(dev, pos,
2579 HT_CAPTYPE_MSI_MAPPING);
2580 }
2581 return 0;
2582}
2583
2584
2585static void quirk_msi_ht_cap(struct pci_dev *dev)
2586{
2587 if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
2588 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2589 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2590 }
2591}
2592DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
2593 quirk_msi_ht_cap);
2594
2595
2596
2597
2598
2599static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
2600{
2601 struct pci_dev *pdev;
2602
2603 if (!dev->subordinate)
2604 return;
2605
2606
2607
2608
2609
2610 pdev = pci_get_slot(dev->bus, 0);
2611 if (!pdev)
2612 return;
2613 if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
2614 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2615 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2616 }
2617 pci_dev_put(pdev);
2618}
2619DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2620 quirk_nvidia_ck804_msi_ht_cap);
2621
2622
2623static void ht_enable_msi_mapping(struct pci_dev *dev)
2624{
2625 int pos, ttl = PCI_FIND_CAP_TTL;
2626
2627 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2628 while (pos && ttl--) {
2629 u8 flags;
2630
2631 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2632 &flags) == 0) {
2633 pci_info(dev, "Enabling HT MSI Mapping\n");
2634
2635 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2636 flags | HT_MSI_FLAGS_ENABLE);
2637 }
2638 pos = pci_find_next_ht_capability(dev, pos,
2639 HT_CAPTYPE_MSI_MAPPING);
2640 }
2641}
2642DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
2643 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
2644 ht_enable_msi_mapping);
2645DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
2646 ht_enable_msi_mapping);
2647
2648
2649
2650
2651
2652
2653static void nvenet_msi_disable(struct pci_dev *dev)
2654{
2655 const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
2656
2657 if (board_name &&
2658 (strstr(board_name, "P5N32-SLI PREMIUM") ||
2659 strstr(board_name, "P5N32-E SLI"))) {
2660 pci_info(dev, "Disabling MSI for MCP55 NIC on P5N32-SLI\n");
2661 dev->no_msi = 1;
2662 }
2663}
2664DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2665 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2666 nvenet_msi_disable);
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677static void pci_quirk_nvidia_tegra_disable_rp_msi(struct pci_dev *dev)
2678{
2679 dev->no_msi = 1;
2680}
2681DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad0,
2682 PCI_CLASS_BRIDGE_PCI, 8,
2683 pci_quirk_nvidia_tegra_disable_rp_msi);
2684DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad1,
2685 PCI_CLASS_BRIDGE_PCI, 8,
2686 pci_quirk_nvidia_tegra_disable_rp_msi);
2687DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad2,
2688 PCI_CLASS_BRIDGE_PCI, 8,
2689 pci_quirk_nvidia_tegra_disable_rp_msi);
2690DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0,
2691 PCI_CLASS_BRIDGE_PCI, 8,
2692 pci_quirk_nvidia_tegra_disable_rp_msi);
2693DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1,
2694 PCI_CLASS_BRIDGE_PCI, 8,
2695 pci_quirk_nvidia_tegra_disable_rp_msi);
2696DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c,
2697 PCI_CLASS_BRIDGE_PCI, 8,
2698 pci_quirk_nvidia_tegra_disable_rp_msi);
2699DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d,
2700 PCI_CLASS_BRIDGE_PCI, 8,
2701 pci_quirk_nvidia_tegra_disable_rp_msi);
2702DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e12,
2703 PCI_CLASS_BRIDGE_PCI, 8,
2704 pci_quirk_nvidia_tegra_disable_rp_msi);
2705DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e13,
2706 PCI_CLASS_BRIDGE_PCI, 8,
2707 pci_quirk_nvidia_tegra_disable_rp_msi);
2708DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0fae,
2709 PCI_CLASS_BRIDGE_PCI, 8,
2710 pci_quirk_nvidia_tegra_disable_rp_msi);
2711DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0faf,
2712 PCI_CLASS_BRIDGE_PCI, 8,
2713 pci_quirk_nvidia_tegra_disable_rp_msi);
2714DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e5,
2715 PCI_CLASS_BRIDGE_PCI, 8,
2716 pci_quirk_nvidia_tegra_disable_rp_msi);
2717DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e6,
2718 PCI_CLASS_BRIDGE_PCI, 8,
2719 pci_quirk_nvidia_tegra_disable_rp_msi);
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
2732{
2733 u32 cfg;
2734
2735 if (!pci_find_capability(dev, PCI_CAP_ID_HT))
2736 return;
2737
2738 pci_read_config_dword(dev, 0x74, &cfg);
2739
2740 if (cfg & ((1 << 2) | (1 << 15))) {
2741 pr_info("Rewriting IRQ routing register on MCP55\n");
2742 cfg &= ~((1 << 2) | (1 << 15));
2743 pci_write_config_dword(dev, 0x74, cfg);
2744 }
2745}
2746DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2747 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
2748 nvbridge_check_legacy_irq_routing);
2749DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2750 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
2751 nvbridge_check_legacy_irq_routing);
2752
2753static int ht_check_msi_mapping(struct pci_dev *dev)
2754{
2755 int pos, ttl = PCI_FIND_CAP_TTL;
2756 int found = 0;
2757
2758
2759 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2760 while (pos && ttl--) {
2761 u8 flags;
2762
2763 if (found < 1)
2764 found = 1;
2765 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2766 &flags) == 0) {
2767 if (flags & HT_MSI_FLAGS_ENABLE) {
2768 if (found < 2) {
2769 found = 2;
2770 break;
2771 }
2772 }
2773 }
2774 pos = pci_find_next_ht_capability(dev, pos,
2775 HT_CAPTYPE_MSI_MAPPING);
2776 }
2777
2778 return found;
2779}
2780
2781static int host_bridge_with_leaf(struct pci_dev *host_bridge)
2782{
2783 struct pci_dev *dev;
2784 int pos;
2785 int i, dev_no;
2786 int found = 0;
2787
2788 dev_no = host_bridge->devfn >> 3;
2789 for (i = dev_no + 1; i < 0x20; i++) {
2790 dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
2791 if (!dev)
2792 continue;
2793
2794
2795 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2796 if (pos != 0) {
2797 pci_dev_put(dev);
2798 break;
2799 }
2800
2801 if (ht_check_msi_mapping(dev)) {
2802 found = 1;
2803 pci_dev_put(dev);
2804 break;
2805 }
2806 pci_dev_put(dev);
2807 }
2808
2809 return found;
2810}
2811
2812#define PCI_HT_CAP_SLAVE_CTRL0 4
2813#define PCI_HT_CAP_SLAVE_CTRL1 8
2814
2815static int is_end_of_ht_chain(struct pci_dev *dev)
2816{
2817 int pos, ctrl_off;
2818 int end = 0;
2819 u16 flags, ctrl;
2820
2821 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2822
2823 if (!pos)
2824 goto out;
2825
2826 pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
2827
2828 ctrl_off = ((flags >> 10) & 1) ?
2829 PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
2830 pci_read_config_word(dev, pos + ctrl_off, &ctrl);
2831
2832 if (ctrl & (1 << 6))
2833 end = 1;
2834
2835out:
2836 return end;
2837}
2838
2839static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
2840{
2841 struct pci_dev *host_bridge;
2842 int pos;
2843 int i, dev_no;
2844 int found = 0;
2845
2846 dev_no = dev->devfn >> 3;
2847 for (i = dev_no; i >= 0; i--) {
2848 host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
2849 if (!host_bridge)
2850 continue;
2851
2852 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2853 if (pos != 0) {
2854 found = 1;
2855 break;
2856 }
2857 pci_dev_put(host_bridge);
2858 }
2859
2860 if (!found)
2861 return;
2862
2863
2864 if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
2865 host_bridge_with_leaf(host_bridge))
2866 goto out;
2867
2868
2869 if (msi_ht_cap_enabled(host_bridge))
2870 goto out;
2871
2872 ht_enable_msi_mapping(dev);
2873
2874out:
2875 pci_dev_put(host_bridge);
2876}
2877
2878static void ht_disable_msi_mapping(struct pci_dev *dev)
2879{
2880 int pos, ttl = PCI_FIND_CAP_TTL;
2881
2882 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2883 while (pos && ttl--) {
2884 u8 flags;
2885
2886 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2887 &flags) == 0) {
2888 pci_info(dev, "Disabling HT MSI Mapping\n");
2889
2890 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2891 flags & ~HT_MSI_FLAGS_ENABLE);
2892 }
2893 pos = pci_find_next_ht_capability(dev, pos,
2894 HT_CAPTYPE_MSI_MAPPING);
2895 }
2896}
2897
2898static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
2899{
2900 struct pci_dev *host_bridge;
2901 int pos;
2902 int found;
2903
2904 if (!pci_msi_enabled())
2905 return;
2906
2907
2908 found = ht_check_msi_mapping(dev);
2909
2910
2911 if (found == 0)
2912 return;
2913
2914
2915
2916
2917
2918 host_bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), 0,
2919 PCI_DEVFN(0, 0));
2920 if (host_bridge == NULL) {
2921 pci_warn(dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
2922 return;
2923 }
2924
2925 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2926 if (pos != 0) {
2927
2928 if (found == 1) {
2929
2930 if (all)
2931 ht_enable_msi_mapping(dev);
2932 else
2933 nv_ht_enable_msi_mapping(dev);
2934 }
2935 goto out;
2936 }
2937
2938
2939 if (found == 1)
2940 goto out;
2941
2942
2943 ht_disable_msi_mapping(dev);
2944
2945out:
2946 pci_dev_put(host_bridge);
2947}
2948
2949static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
2950{
2951 return __nv_msi_ht_cap_quirk(dev, 1);
2952}
2953DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2954DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2955
2956static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
2957{
2958 return __nv_msi_ht_cap_quirk(dev, 0);
2959}
2960DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2961DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2962
2963static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
2964{
2965 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2966}
2967
2968static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
2969{
2970 struct pci_dev *p;
2971
2972
2973
2974
2975
2976
2977 p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
2978 NULL);
2979 if (!p)
2980 return;
2981
2982 if ((p->revision < 0x3B) && (p->revision >= 0x30))
2983 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2984 pci_dev_put(p);
2985}
2986
2987static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
2988{
2989
2990 if (dev->revision < 0x18) {
2991 pci_info(dev, "set MSI_INTX_DISABLE_BUG flag\n");
2992 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2993 }
2994}
2995DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2996 PCI_DEVICE_ID_TIGON3_5780,
2997 quirk_msi_intx_disable_bug);
2998DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2999 PCI_DEVICE_ID_TIGON3_5780S,
3000 quirk_msi_intx_disable_bug);
3001DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3002 PCI_DEVICE_ID_TIGON3_5714,
3003 quirk_msi_intx_disable_bug);
3004DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3005 PCI_DEVICE_ID_TIGON3_5714S,
3006 quirk_msi_intx_disable_bug);
3007DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3008 PCI_DEVICE_ID_TIGON3_5715,
3009 quirk_msi_intx_disable_bug);
3010DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3011 PCI_DEVICE_ID_TIGON3_5715S,
3012 quirk_msi_intx_disable_bug);
3013
3014DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390,
3015 quirk_msi_intx_disable_ati_bug);
3016DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391,
3017 quirk_msi_intx_disable_ati_bug);
3018DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392,
3019 quirk_msi_intx_disable_ati_bug);
3020DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393,
3021 quirk_msi_intx_disable_ati_bug);
3022DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394,
3023 quirk_msi_intx_disable_ati_bug);
3024
3025DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373,
3026 quirk_msi_intx_disable_bug);
3027DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
3028 quirk_msi_intx_disable_bug);
3029DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
3030 quirk_msi_intx_disable_bug);
3031
3032DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
3033 quirk_msi_intx_disable_bug);
3034DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
3035 quirk_msi_intx_disable_bug);
3036DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
3037 quirk_msi_intx_disable_bug);
3038DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
3039 quirk_msi_intx_disable_bug);
3040DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
3041 quirk_msi_intx_disable_bug);
3042DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
3043 quirk_msi_intx_disable_bug);
3044DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090,
3045 quirk_msi_intx_disable_qca_bug);
3046DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091,
3047 quirk_msi_intx_disable_qca_bug);
3048DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0,
3049 quirk_msi_intx_disable_qca_bug);
3050DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
3051 quirk_msi_intx_disable_qca_bug);
3052DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
3053 quirk_msi_intx_disable_qca_bug);
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065static void quirk_al_msi_disable(struct pci_dev *dev)
3066{
3067 dev->no_msi = 1;
3068 pci_warn(dev, "Disabling MSI/MSI-X\n");
3069}
3070DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
3071 PCI_CLASS_BRIDGE_PCI, 8, quirk_al_msi_disable);
3072#endif
3073
3074
3075
3076
3077
3078
3079
3080
3081static void quirk_hotplug_bridge(struct pci_dev *dev)
3082{
3083 dev->is_hotplug_bridge = 1;
3084}
3085DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112#ifdef CONFIG_MMC_RICOH_MMC
3113static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
3114{
3115 u8 write_enable;
3116 u8 write_target;
3117 u8 disable;
3118
3119
3120
3121
3122
3123
3124 if (PCI_FUNC(dev->devfn))
3125 return;
3126
3127 pci_read_config_byte(dev, 0xB7, &disable);
3128 if (disable & 0x02)
3129 return;
3130
3131 pci_read_config_byte(dev, 0x8E, &write_enable);
3132 pci_write_config_byte(dev, 0x8E, 0xAA);
3133 pci_read_config_byte(dev, 0x8D, &write_target);
3134 pci_write_config_byte(dev, 0x8D, 0xB7);
3135 pci_write_config_byte(dev, 0xB7, disable | 0x02);
3136 pci_write_config_byte(dev, 0x8E, write_enable);
3137 pci_write_config_byte(dev, 0x8D, write_target);
3138
3139 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via CardBus function)\n");
3140 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
3141}
3142DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
3143DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
3144
3145static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
3146{
3147 u8 write_enable;
3148 u8 disable;
3149
3150
3151
3152
3153
3154
3155 if (PCI_FUNC(dev->devfn))
3156 return;
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169 if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
3170 dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
3171 pci_write_config_byte(dev, 0xf9, 0xfc);
3172 pci_write_config_byte(dev, 0x150, 0x10);
3173 pci_write_config_byte(dev, 0xf9, 0x00);
3174 pci_write_config_byte(dev, 0xfc, 0x01);
3175 pci_write_config_byte(dev, 0xe1, 0x32);
3176 pci_write_config_byte(dev, 0xfc, 0x00);
3177
3178 pci_notice(dev, "MMC controller base frequency changed to 50Mhz.\n");
3179 }
3180
3181 pci_read_config_byte(dev, 0xCB, &disable);
3182
3183 if (disable & 0x02)
3184 return;
3185
3186 pci_read_config_byte(dev, 0xCA, &write_enable);
3187 pci_write_config_byte(dev, 0xCA, 0x57);
3188 pci_write_config_byte(dev, 0xCB, disable | 0x02);
3189 pci_write_config_byte(dev, 0xCA, write_enable);
3190
3191 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via FireWire function)\n");
3192 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
3193
3194}
3195DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
3196DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
3197DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
3198DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
3199DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
3200DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
3201#endif
3202
3203#ifdef CONFIG_DMAR_TABLE
3204#define VTUNCERRMSK_REG 0x1ac
3205#define VTD_MSK_SPEC_ERRORS (1 << 31)
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216static void vtd_mask_spec_errors(struct pci_dev *dev)
3217{
3218 u32 word;
3219
3220 pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
3221 pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
3222}
3223DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
3224DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
3225#endif
3226
3227static void fixup_ti816x_class(struct pci_dev *dev)
3228{
3229 u32 class = dev->class;
3230
3231
3232 dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
3233 pci_info(dev, "PCI class overridden (%#08x -> %#08x)\n",
3234 class, dev->class);
3235}
3236DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
3237 PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
3238
3239
3240
3241
3242
3243static void fixup_mpss_256(struct pci_dev *dev)
3244{
3245 dev->pcie_mpss = 1;
3246}
3247DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3248 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
3249DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3250 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
3251DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3252 PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262static void quirk_intel_mc_errata(struct pci_dev *dev)
3263{
3264 int err;
3265 u16 rcc;
3266
3267 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
3268 pcie_bus_config == PCIE_BUS_DEFAULT)
3269 return;
3270
3271
3272
3273
3274
3275
3276 err = pci_read_config_word(dev, 0x48, &rcc);
3277 if (err) {
3278 pci_err(dev, "Error attempting to read the read completion coalescing register\n");
3279 return;
3280 }
3281
3282 if (!(rcc & (1 << 10)))
3283 return;
3284
3285 rcc &= ~(1 << 10);
3286
3287 err = pci_write_config_word(dev, 0x48, rcc);
3288 if (err) {
3289 pci_err(dev, "Error attempting to write the read completion coalescing register\n");
3290 return;
3291 }
3292
3293 pr_info_once("Read completion coalescing disabled due to hardware erratum relating to 256B MPS\n");
3294}
3295
3296DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
3297DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
3298DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
3299DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
3300DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
3301DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
3302DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
3303DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
3304DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
3305DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
3306DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
3307DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
3308DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
3309DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
3310
3311DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
3312DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
3313DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
3314DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
3315DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
3316DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
3317DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
3318DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
3319DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
3320DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
3321DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
3322
3323
3324
3325
3326
3327
3328static void quirk_intel_ntb(struct pci_dev *dev)
3329{
3330 int rc;
3331 u8 val;
3332
3333 rc = pci_read_config_byte(dev, 0x00D0, &val);
3334 if (rc)
3335 return;
3336
3337 dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
3338
3339 rc = pci_read_config_byte(dev, 0x00D1, &val);
3340 if (rc)
3341 return;
3342
3343 dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
3344}
3345DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
3346DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360#define I915_DEIER_REG 0x4400c
3361static void disable_igfx_irq(struct pci_dev *dev)
3362{
3363 void __iomem *regs = pci_iomap(dev, 0, 0);
3364 if (regs == NULL) {
3365 pci_warn(dev, "igfx quirk: Can't iomap PCI device\n");
3366 return;
3367 }
3368
3369
3370 if (readl(regs + I915_DEIER_REG) != 0) {
3371 pci_warn(dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
3372
3373 writel(0, regs + I915_DEIER_REG);
3374 }
3375
3376 pci_iounmap(dev, regs);
3377}
3378DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq);
3379DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq);
3380DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq);
3381DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
3382DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq);
3383DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
3384DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
3385
3386
3387
3388
3389
3390static void quirk_remove_d3hot_delay(struct pci_dev *dev)
3391{
3392 dev->d3hot_delay = 0;
3393}
3394
3395DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3hot_delay);
3396DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3hot_delay);
3397DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3hot_delay);
3398
3399DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3hot_delay);
3400DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3hot_delay);
3401DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3hot_delay);
3402DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3hot_delay);
3403DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3hot_delay);
3404DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3hot_delay);
3405DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3hot_delay);
3406DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3hot_delay);
3407DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3hot_delay);
3408DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3hot_delay);
3409DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3hot_delay);
3410
3411DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3hot_delay);
3412DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3hot_delay);
3413DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3hot_delay);
3414DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3hot_delay);
3415DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3hot_delay);
3416DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3hot_delay);
3417DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3hot_delay);
3418DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3hot_delay);
3419DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3hot_delay);
3420
3421
3422
3423
3424
3425
3426static void quirk_broken_intx_masking(struct pci_dev *dev)
3427{
3428 dev->broken_intx_masking = 1;
3429}
3430DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030,
3431 quirk_broken_intx_masking);
3432DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601,
3433 quirk_broken_intx_masking);
3434DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004,
3435 quirk_broken_intx_masking);
3436
3437
3438
3439
3440
3441
3442
3443DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
3444 quirk_broken_intx_masking);
3445
3446
3447
3448
3449
3450DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, quirk_broken_intx_masking);
3451DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, quirk_broken_intx_masking);
3452DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, quirk_broken_intx_masking);
3453DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, quirk_broken_intx_masking);
3454DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, quirk_broken_intx_masking);
3455DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, quirk_broken_intx_masking);
3456DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, quirk_broken_intx_masking);
3457DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, quirk_broken_intx_masking);
3458DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, quirk_broken_intx_masking);
3459DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking);
3460DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking);
3461DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, quirk_broken_intx_masking);
3462DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, quirk_broken_intx_masking);
3463DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking);
3464DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, quirk_broken_intx_masking);
3465DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, quirk_broken_intx_masking);
3466
3467static u16 mellanox_broken_intx_devs[] = {
3468 PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
3469 PCI_DEVICE_ID_MELLANOX_HERMON_DDR,
3470 PCI_DEVICE_ID_MELLANOX_HERMON_QDR,
3471 PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2,
3472 PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2,
3473 PCI_DEVICE_ID_MELLANOX_HERMON_EN,
3474 PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2,
3475 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN,
3476 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2,
3477 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2,
3478 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2,
3479 PCI_DEVICE_ID_MELLANOX_CONNECTX2,
3480 PCI_DEVICE_ID_MELLANOX_CONNECTX3,
3481 PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO,
3482};
3483
3484#define CONNECTX_4_CURR_MAX_MINOR 99
3485#define CONNECTX_4_INTX_SUPPORT_MINOR 14
3486
3487
3488
3489
3490
3491
3492
3493static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
3494{
3495 __be32 __iomem *fw_ver;
3496 u16 fw_major;
3497 u16 fw_minor;
3498 u16 fw_subminor;
3499 u32 fw_maj_min;
3500 u32 fw_sub_min;
3501 int i;
3502
3503 for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) {
3504 if (pdev->device == mellanox_broken_intx_devs[i]) {
3505 pdev->broken_intx_masking = 1;
3506 return;
3507 }
3508 }
3509
3510
3511
3512
3513
3514 if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
3515 return;
3516
3517 if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 &&
3518 pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX)
3519 return;
3520
3521
3522 if (pci_enable_device_mem(pdev)) {
3523 pci_warn(pdev, "Can't enable device memory\n");
3524 return;
3525 }
3526
3527 fw_ver = ioremap(pci_resource_start(pdev, 0), 4);
3528 if (!fw_ver) {
3529 pci_warn(pdev, "Can't map ConnectX-4 initialization segment\n");
3530 goto out;
3531 }
3532
3533
3534 fw_maj_min = ioread32be(fw_ver);
3535 fw_sub_min = ioread32be(fw_ver + 1);
3536 fw_major = fw_maj_min & 0xffff;
3537 fw_minor = fw_maj_min >> 16;
3538 fw_subminor = fw_sub_min & 0xffff;
3539 if (fw_minor > CONNECTX_4_CURR_MAX_MINOR ||
3540 fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) {
3541 pci_warn(pdev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n",
3542 fw_major, fw_minor, fw_subminor, pdev->device ==
3543 PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14);
3544 pdev->broken_intx_masking = 1;
3545 }
3546
3547 iounmap(fw_ver);
3548
3549out:
3550 pci_disable_device(pdev);
3551}
3552DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
3553 mellanox_check_broken_intx_masking);
3554
3555static void quirk_no_bus_reset(struct pci_dev *dev)
3556{
3557 dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
3558}
3559
3560
3561
3562
3563
3564
3565
3566
3567DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
3568DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
3569DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
3570DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
3571DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
3572
3573
3574
3575
3576
3577
3578DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
3579
3580static void quirk_no_pm_reset(struct pci_dev *dev)
3581{
3582
3583
3584
3585
3586 if (!pci_is_root_bus(dev->bus))
3587 dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
3588}
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
3599 PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
3600
3601
3602
3603
3604
3605
3606static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
3607{
3608 if (pdev->is_hotplug_bridge &&
3609 (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
3610 pdev->revision <= 1))
3611 pdev->no_msi = 1;
3612}
3613DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
3614 quirk_thunderbolt_hotplug_msi);
3615DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE,
3616 quirk_thunderbolt_hotplug_msi);
3617DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK,
3618 quirk_thunderbolt_hotplug_msi);
3619DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3620 quirk_thunderbolt_hotplug_msi);
3621DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
3622 quirk_thunderbolt_hotplug_msi);
3623
3624#ifdef CONFIG_ACPI
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
3641{
3642 acpi_handle bridge, SXIO, SXFP, SXLV;
3643
3644 if (!x86_apple_machine)
3645 return;
3646 if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
3647 return;
3648 bridge = ACPI_HANDLE(&dev->dev);
3649 if (!bridge)
3650 return;
3651
3652
3653
3654
3655
3656
3657
3658
3659 if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
3660 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
3661 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
3662 return;
3663 pci_info(dev, "quirk: cutting power to Thunderbolt controller...\n");
3664
3665
3666 acpi_execute_simple_method(SXIO, NULL, 1);
3667 acpi_execute_simple_method(SXFP, NULL, 0);
3668 msleep(300);
3669 acpi_execute_simple_method(SXLV, NULL, 0);
3670 acpi_execute_simple_method(SXIO, NULL, 0);
3671 acpi_execute_simple_method(SXLV, NULL, 0);
3672}
3673DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
3674 PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3675 quirk_apple_poweroff_thunderbolt);
3676#endif
3677
3678
3679
3680
3681
3682
3683static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
3684{
3685
3686
3687
3688
3689
3690
3691
3692
3693 if (!probe)
3694 pcie_flr(dev);
3695 return 0;
3696}
3697
3698#define SOUTH_CHICKEN2 0xc2004
3699#define PCH_PP_STATUS 0xc7200
3700#define PCH_PP_CONTROL 0xc7204
3701#define MSG_CTL 0x45010
3702#define NSDE_PWR_STATE 0xd0100
3703#define IGD_OPERATION_TIMEOUT 10000
3704
3705static int reset_ivb_igd(struct pci_dev *dev, int probe)
3706{
3707 void __iomem *mmio_base;
3708 unsigned long timeout;
3709 u32 val;
3710
3711 if (probe)
3712 return 0;
3713
3714 mmio_base = pci_iomap(dev, 0, 0);
3715 if (!mmio_base)
3716 return -ENOMEM;
3717
3718 iowrite32(0x00000002, mmio_base + MSG_CTL);
3719
3720
3721
3722
3723
3724
3725
3726 iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2);
3727
3728 val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe;
3729 iowrite32(val, mmio_base + PCH_PP_CONTROL);
3730
3731 timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT);
3732 do {
3733 val = ioread32(mmio_base + PCH_PP_STATUS);
3734 if ((val & 0xb0000000) == 0)
3735 goto reset_complete;
3736 msleep(10);
3737 } while (time_before(jiffies, timeout));
3738 pci_warn(dev, "timeout during reset\n");
3739
3740reset_complete:
3741 iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
3742
3743 pci_iounmap(dev, mmio_base);
3744 return 0;
3745}
3746
3747
3748static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
3749{
3750 u16 old_command;
3751 u16 msix_flags;
3752
3753
3754
3755
3756
3757 if ((dev->device & 0xf000) != 0x4000)
3758 return -ENOTTY;
3759
3760
3761
3762
3763
3764 if (probe)
3765 return 0;
3766
3767
3768
3769
3770
3771
3772
3773 pci_read_config_word(dev, PCI_COMMAND, &old_command);
3774 pci_write_config_word(dev, PCI_COMMAND,
3775 old_command | PCI_COMMAND_MASTER);
3776
3777
3778
3779
3780
3781 pci_save_state(dev);
3782
3783
3784
3785
3786
3787
3788
3789
3790 pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
3791 if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
3792 pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
3793 msix_flags |
3794 PCI_MSIX_FLAGS_ENABLE |
3795 PCI_MSIX_FLAGS_MASKALL);
3796
3797 pcie_flr(dev);
3798
3799
3800
3801
3802
3803
3804 pci_restore_state(dev);
3805 pci_write_config_word(dev, PCI_COMMAND, old_command);
3806 return 0;
3807}
3808
3809#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
3810#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
3811#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826static int nvme_disable_and_flr(struct pci_dev *dev, int probe)
3827{
3828 void __iomem *bar;
3829 u16 cmd;
3830 u32 cfg;
3831
3832 if (dev->class != PCI_CLASS_STORAGE_EXPRESS ||
3833 !pcie_has_flr(dev) || !pci_resource_start(dev, 0))
3834 return -ENOTTY;
3835
3836 if (probe)
3837 return 0;
3838
3839 bar = pci_iomap(dev, 0, NVME_REG_CC + sizeof(cfg));
3840 if (!bar)
3841 return -ENOTTY;
3842
3843 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3844 pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MEMORY);
3845
3846 cfg = readl(bar + NVME_REG_CC);
3847
3848
3849 if (cfg & NVME_CC_ENABLE) {
3850 u32 cap = readl(bar + NVME_REG_CAP);
3851 unsigned long timeout;
3852
3853
3854
3855
3856
3857
3858 cfg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
3859
3860 writel(cfg, bar + NVME_REG_CC);
3861
3862
3863
3864
3865
3866
3867
3868
3869 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
3870
3871 for (;;) {
3872 u32 status = readl(bar + NVME_REG_CSTS);
3873
3874
3875 if (!(status & NVME_CSTS_RDY))
3876 break;
3877
3878 msleep(100);
3879
3880 if (time_after(jiffies, timeout)) {
3881 pci_warn(dev, "Timeout waiting for NVMe ready status to clear after disable\n");
3882 break;
3883 }
3884 }
3885 }
3886
3887 pci_iounmap(dev, bar);
3888
3889 pcie_flr(dev);
3890
3891 return 0;
3892}
3893
3894
3895
3896
3897
3898
3899
3900static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
3901{
3902 if (!pcie_has_flr(dev))
3903 return -ENOTTY;
3904
3905 if (probe)
3906 return 0;
3907
3908 pcie_flr(dev);
3909
3910 msleep(250);
3911
3912 return 0;
3913}
3914
3915static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
3916 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
3917 reset_intel_82599_sfp_virtfn },
3918 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
3919 reset_ivb_igd },
3920 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
3921 reset_ivb_igd },
3922 { PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr },
3923 { PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
3924 { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
3925 reset_chelsio_generic_dev },
3926 { 0 }
3927};
3928
3929
3930
3931
3932
3933
3934int pci_dev_specific_reset(struct pci_dev *dev, int probe)
3935{
3936 const struct pci_dev_reset_methods *i;
3937
3938 for (i = pci_dev_reset_methods; i->reset; i++) {
3939 if ((i->vendor == dev->vendor ||
3940 i->vendor == (u16)PCI_ANY_ID) &&
3941 (i->device == dev->device ||
3942 i->device == (u16)PCI_ANY_ID))
3943 return i->reset(dev, probe);
3944 }
3945
3946 return -ENOTTY;
3947}
3948
3949static void quirk_dma_func0_alias(struct pci_dev *dev)
3950{
3951 if (PCI_FUNC(dev->devfn) != 0)
3952 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0), 1);
3953}
3954
3955
3956
3957
3958
3959
3960DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
3961DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
3962
3963static void quirk_dma_func1_alias(struct pci_dev *dev)
3964{
3965 if (PCI_FUNC(dev->devfn) != 1)
3966 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1), 1);
3967}
3968
3969
3970
3971
3972
3973
3974
3975DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
3976 quirk_dma_func1_alias);
3977DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
3978 quirk_dma_func1_alias);
3979DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
3980 quirk_dma_func1_alias);
3981
3982DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
3983 quirk_dma_func1_alias);
3984DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
3985 quirk_dma_func1_alias);
3986
3987DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
3988 quirk_dma_func1_alias);
3989
3990DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
3991 quirk_dma_func1_alias);
3992
3993DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
3994 quirk_dma_func1_alias);
3995
3996DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183,
3997 quirk_dma_func1_alias);
3998
3999DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
4000 quirk_dma_func1_alias);
4001
4002DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
4003 quirk_dma_func1_alias);
4004
4005DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
4006 quirk_dma_func1_alias);
4007DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
4008 quirk_dma_func1_alias);
4009DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
4010 quirk_dma_func1_alias);
4011
4012DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
4013 PCI_DEVICE_ID_JMICRON_JMB388_ESD,
4014 quirk_dma_func1_alias);
4015
4016DECLARE_PCI_FIXUP_HEADER(0x1c28,
4017 0x0122,
4018 quirk_dma_func1_alias);
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035static const struct pci_device_id fixed_dma_alias_tbl[] = {
4036 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
4037 PCI_VENDOR_ID_ADAPTEC2, 0x02bb),
4038 .driver_data = PCI_DEVFN(1, 0) },
4039 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
4040 PCI_VENDOR_ID_ADAPTEC2, 0x02bc),
4041 .driver_data = PCI_DEVFN(1, 0) },
4042 { 0 }
4043};
4044
4045static void quirk_fixed_dma_alias(struct pci_dev *dev)
4046{
4047 const struct pci_device_id *id;
4048
4049 id = pci_match_id(fixed_dma_alias_tbl, dev);
4050 if (id)
4051 pci_add_dma_alias(dev, id->driver_data, 1);
4052}
4053DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev)
4065{
4066 if (!pci_is_root_bus(pdev->bus) &&
4067 pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
4068 !pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) &&
4069 pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE)
4070 pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS;
4071}
4072
4073DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
4074 quirk_use_pcie_bridge_dma_alias);
4075
4076DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
4077
4078DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
4079
4080DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8893, quirk_use_pcie_bridge_dma_alias);
4081
4082DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
4083
4084
4085
4086
4087
4088
4089
4090static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
4091{
4092 pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0), 1);
4093 pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0), 1);
4094 pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3), 1);
4095}
4096DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
4097DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113static void quirk_pex_vca_alias(struct pci_dev *pdev)
4114{
4115 const unsigned int num_pci_slots = 0x20;
4116 unsigned int slot;
4117
4118 for (slot = 0; slot < num_pci_slots; slot++)
4119 pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0), 5);
4120}
4121DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
4122DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
4123DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2956, quirk_pex_vca_alias);
4124DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2958, quirk_pex_vca_alias);
4125DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2959, quirk_pex_vca_alias);
4126DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x295A, quirk_pex_vca_alias);
4127
4128
4129
4130
4131
4132
4133static void quirk_bridge_cavm_thrx2_pcie_root(struct pci_dev *pdev)
4134{
4135 pdev->dev_flags |= PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT;
4136}
4137DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000,
4138 quirk_bridge_cavm_thrx2_pcie_root);
4139DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084,
4140 quirk_bridge_cavm_thrx2_pcie_root);
4141
4142
4143
4144
4145
4146static void quirk_tw686x_class(struct pci_dev *pdev)
4147{
4148 u32 class = pdev->class;
4149
4150
4151 pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01;
4152 pci_info(pdev, "TW686x PCI class overridden (%#08x -> %#08x)\n",
4153 class, pdev->class);
4154}
4155DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8,
4156 quirk_tw686x_class);
4157DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6865, PCI_CLASS_NOT_DEFINED, 8,
4158 quirk_tw686x_class);
4159DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
4160 quirk_tw686x_class);
4161DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
4162 quirk_tw686x_class);
4163
4164
4165
4166
4167
4168
4169static void quirk_relaxedordering_disable(struct pci_dev *dev)
4170{
4171 dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
4172 pci_info(dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
4173}
4174
4175
4176
4177
4178
4179
4180DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
4181 quirk_relaxedordering_disable);
4182DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8,
4183 quirk_relaxedordering_disable);
4184DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8,
4185 quirk_relaxedordering_disable);
4186DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8,
4187 quirk_relaxedordering_disable);
4188DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8,
4189 quirk_relaxedordering_disable);
4190DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8,
4191 quirk_relaxedordering_disable);
4192DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8,
4193 quirk_relaxedordering_disable);
4194DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8,
4195 quirk_relaxedordering_disable);
4196DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8,
4197 quirk_relaxedordering_disable);
4198DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8,
4199 quirk_relaxedordering_disable);
4200DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8,
4201 quirk_relaxedordering_disable);
4202DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8,
4203 quirk_relaxedordering_disable);
4204DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8,
4205 quirk_relaxedordering_disable);
4206DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8,
4207 quirk_relaxedordering_disable);
4208DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8,
4209 quirk_relaxedordering_disable);
4210DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8,
4211 quirk_relaxedordering_disable);
4212DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8,
4213 quirk_relaxedordering_disable);
4214DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8,
4215 quirk_relaxedordering_disable);
4216DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8,
4217 quirk_relaxedordering_disable);
4218DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8,
4219 quirk_relaxedordering_disable);
4220DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8,
4221 quirk_relaxedordering_disable);
4222DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8,
4223 quirk_relaxedordering_disable);
4224DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8,
4225 quirk_relaxedordering_disable);
4226DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8,
4227 quirk_relaxedordering_disable);
4228DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8,
4229 quirk_relaxedordering_disable);
4230DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8,
4231 quirk_relaxedordering_disable);
4232DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8,
4233 quirk_relaxedordering_disable);
4234DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8,
4235 quirk_relaxedordering_disable);
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8,
4247 quirk_relaxedordering_disable);
4248DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8,
4249 quirk_relaxedordering_disable);
4250DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8,
4251 quirk_relaxedordering_disable);
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
4277{
4278 struct pci_dev *root_port = pcie_find_root_port(pdev);
4279
4280 if (!root_port) {
4281 pci_warn(pdev, "PCIe Completion erratum may cause device errors\n");
4282 return;
4283 }
4284
4285 pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
4286 dev_name(&pdev->dev));
4287 pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
4288 PCI_EXP_DEVCTL_RELAX_EN |
4289 PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
4290}
4291
4292
4293
4294
4295
4296static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
4297{
4298
4299
4300
4301
4302
4303
4304 if ((pdev->device & 0xff00) == 0x5400)
4305 quirk_disable_root_port_attributes(pdev);
4306}
4307DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
4308 quirk_chelsio_T5_disable_root_port_attributes);
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena)
4322{
4323 if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req)
4324 return 1;
4325 return 0;
4326}
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
4354{
4355#ifdef CONFIG_ACPI
4356 struct acpi_table_header *header = NULL;
4357 acpi_status status;
4358
4359
4360 if (!dev->multifunction || !pci_is_root_bus(dev->bus))
4361 return -ENODEV;
4362
4363
4364 status = acpi_get_table("IVRS", 0, &header);
4365 if (ACPI_FAILURE(status))
4366 return -ENODEV;
4367
4368 acpi_put_table(header);
4369
4370
4371 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
4372
4373 return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR);
4374#else
4375 return -ENODEV;
4376#endif
4377}
4378
4379static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
4380{
4381 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4382 return false;
4383
4384 switch (dev->device) {
4385
4386
4387
4388
4389 case 0xa000 ... 0xa7ff:
4390 case 0xaf84:
4391 case 0xb884:
4392 return true;
4393 default:
4394 return false;
4395 }
4396}
4397
4398static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
4399{
4400 if (!pci_quirk_cavium_acs_match(dev))
4401 return -ENOTTY;
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411 return pci_acs_ctrl_enabled(acs_flags,
4412 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4413}
4414
4415static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
4416{
4417
4418
4419
4420
4421
4422 return pci_acs_ctrl_enabled(acs_flags,
4423 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4424}
4425
4426
4427
4428
4429
4430
4431static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
4432{
4433 if (!pci_is_pcie(dev) ||
4434 ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
4435 (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
4436 return -ENOTTY;
4437
4438 switch (dev->device) {
4439 case 0x0710 ... 0x071e:
4440 case 0x0721:
4441 case 0x0723 ... 0x0732:
4442 return pci_acs_ctrl_enabled(acs_flags,
4443 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4444 }
4445
4446 return false;
4447}
4448
4449
4450
4451
4452
4453
4454
4455static const u16 pci_quirk_intel_pch_acs_ids[] = {
4456
4457 0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49,
4458 0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51,
4459
4460 0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17,
4461 0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f,
4462
4463 0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17,
4464 0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f,
4465
4466 0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17,
4467 0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f,
4468
4469 0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17,
4470 0x9c18, 0x9c19, 0x9c1a, 0x9c1b,
4471
4472 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
4473 0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
4474
4475 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
4476
4477 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
4478 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
4479
4480 0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e,
4481};
4482
4483static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
4484{
4485 int i;
4486
4487
4488 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4489 return false;
4490
4491 for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++)
4492 if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
4493 return true;
4494
4495 return false;
4496}
4497
4498static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
4499{
4500 if (!pci_quirk_intel_pch_acs_match(dev))
4501 return -ENOTTY;
4502
4503 if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
4504 return pci_acs_ctrl_enabled(acs_flags,
4505 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4506
4507 return pci_acs_ctrl_enabled(acs_flags, 0);
4508}
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4521{
4522 return pci_acs_ctrl_enabled(acs_flags,
4523 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4524}
4525
4526static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
4527{
4528 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4529 return -ENOTTY;
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539 acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4540
4541 return acs_flags ? 0 : 1;
4542}
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4590{
4591 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4592 return false;
4593
4594 switch (dev->device) {
4595 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a:
4596 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee:
4597 case 0x9d10 ... 0x9d1b:
4598 return true;
4599 }
4600
4601 return false;
4602}
4603
4604#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
4605
4606static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
4607{
4608 int pos;
4609 u32 cap, ctrl;
4610
4611 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4612 return -ENOTTY;
4613
4614 pos = dev->acs_cap;
4615 if (!pos)
4616 return -ENOTTY;
4617
4618
4619 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4620 acs_flags &= (cap | PCI_ACS_EC);
4621
4622 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4623
4624 return pci_acs_ctrl_enabled(acs_flags, ctrl);
4625}
4626
4627static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
4628{
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638 return pci_acs_ctrl_enabled(acs_flags,
4639 PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
4640 PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
4641}
4642
4643static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags)
4644{
4645
4646
4647
4648
4649
4650 if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END)
4651 return -ENOTTY;
4652
4653 return pci_acs_ctrl_enabled(acs_flags,
4654 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4655}
4656
4657static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
4658{
4659
4660
4661
4662
4663
4664
4665 return pci_acs_ctrl_enabled(acs_flags,
4666 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4667}
4668
4669static const struct pci_dev_acs_enabled {
4670 u16 vendor;
4671 u16 device;
4672 int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
4673} pci_dev_acs_enabled[] = {
4674 { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
4675 { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
4676 { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
4677 { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
4678 { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
4679 { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
4680 { PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs },
4681 { PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
4682 { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
4683 { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
4684 { PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs },
4685 { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
4686 { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
4687 { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
4688 { PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
4689 { PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
4690 { PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
4691 { PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
4692 { PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
4693 { PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
4694 { PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
4695 { PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
4696 { PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
4697 { PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
4698 { PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
4699 { PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
4700 { PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
4701 { PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
4702 { PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
4703 { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
4704 { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
4705
4706 { PCI_VENDOR_ID_INTEL, 0x1509, pci_quirk_mf_endpoint_acs },
4707 { PCI_VENDOR_ID_INTEL, 0x150E, pci_quirk_mf_endpoint_acs },
4708 { PCI_VENDOR_ID_INTEL, 0x150F, pci_quirk_mf_endpoint_acs },
4709 { PCI_VENDOR_ID_INTEL, 0x1510, pci_quirk_mf_endpoint_acs },
4710 { PCI_VENDOR_ID_INTEL, 0x1511, pci_quirk_mf_endpoint_acs },
4711 { PCI_VENDOR_ID_INTEL, 0x1516, pci_quirk_mf_endpoint_acs },
4712 { PCI_VENDOR_ID_INTEL, 0x1527, pci_quirk_mf_endpoint_acs },
4713
4714 { PCI_VENDOR_ID_INTEL, 0x10C9, pci_quirk_mf_endpoint_acs },
4715 { PCI_VENDOR_ID_INTEL, 0x10E6, pci_quirk_mf_endpoint_acs },
4716 { PCI_VENDOR_ID_INTEL, 0x10E7, pci_quirk_mf_endpoint_acs },
4717 { PCI_VENDOR_ID_INTEL, 0x10E8, pci_quirk_mf_endpoint_acs },
4718 { PCI_VENDOR_ID_INTEL, 0x150A, pci_quirk_mf_endpoint_acs },
4719 { PCI_VENDOR_ID_INTEL, 0x150D, pci_quirk_mf_endpoint_acs },
4720 { PCI_VENDOR_ID_INTEL, 0x1518, pci_quirk_mf_endpoint_acs },
4721 { PCI_VENDOR_ID_INTEL, 0x1526, pci_quirk_mf_endpoint_acs },
4722
4723 { PCI_VENDOR_ID_INTEL, 0x10A7, pci_quirk_mf_endpoint_acs },
4724 { PCI_VENDOR_ID_INTEL, 0x10A9, pci_quirk_mf_endpoint_acs },
4725 { PCI_VENDOR_ID_INTEL, 0x10D6, pci_quirk_mf_endpoint_acs },
4726
4727 { PCI_VENDOR_ID_INTEL, 0x1521, pci_quirk_mf_endpoint_acs },
4728 { PCI_VENDOR_ID_INTEL, 0x1522, pci_quirk_mf_endpoint_acs },
4729 { PCI_VENDOR_ID_INTEL, 0x1523, pci_quirk_mf_endpoint_acs },
4730 { PCI_VENDOR_ID_INTEL, 0x1524, pci_quirk_mf_endpoint_acs },
4731
4732 { PCI_VENDOR_ID_INTEL, 0x105E, pci_quirk_mf_endpoint_acs },
4733 { PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs },
4734 { PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs },
4735 { PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs },
4736
4737 { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
4738 { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
4739 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs },
4740
4741 { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
4742 { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
4743
4744 { PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs },
4745
4746 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
4747 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
4748 { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs },
4749 { 0x10df, 0x720, pci_quirk_mf_endpoint_acs },
4750
4751 { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
4752
4753 { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
4754
4755 { PCI_VENDOR_ID_AMPERE, 0xE005, pci_quirk_xgene_acs },
4756 { PCI_VENDOR_ID_AMPERE, 0xE006, pci_quirk_xgene_acs },
4757 { PCI_VENDOR_ID_AMPERE, 0xE007, pci_quirk_xgene_acs },
4758 { PCI_VENDOR_ID_AMPERE, 0xE008, pci_quirk_xgene_acs },
4759 { PCI_VENDOR_ID_AMPERE, 0xE009, pci_quirk_xgene_acs },
4760 { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
4761 { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
4762 { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
4763 { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
4764
4765 { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
4766
4767 { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
4768 { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
4769 { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
4770
4771 { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
4772 { 0 }
4773};
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
4787{
4788 const struct pci_dev_acs_enabled *i;
4789 int ret;
4790
4791
4792
4793
4794
4795
4796
4797 for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
4798 if ((i->vendor == dev->vendor ||
4799 i->vendor == (u16)PCI_ANY_ID) &&
4800 (i->device == dev->device ||
4801 i->device == (u16)PCI_ANY_ID)) {
4802 ret = i->acs_enabled(dev, acs_flags);
4803 if (ret >= 0)
4804 return ret;
4805 }
4806 }
4807
4808 return -ENOTTY;
4809}
4810
4811
4812#define INTEL_LPC_RCBA_REG 0xf0
4813
4814#define INTEL_LPC_RCBA_MASK 0xffffc000
4815
4816#define INTEL_LPC_RCBA_ENABLE (1 << 0)
4817
4818
4819#define INTEL_BSPR_REG 0x1104
4820
4821#define INTEL_BSPR_REG_BPNPD (1 << 8)
4822
4823#define INTEL_BSPR_REG_BPPD (1 << 9)
4824
4825
4826#define INTEL_UPDCR_REG 0x1014
4827
4828#define INTEL_UPDCR_REG_MASK 0x3f
4829
4830static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
4831{
4832 u32 rcba, bspr, updcr;
4833 void __iomem *rcba_mem;
4834
4835
4836
4837
4838
4839
4840 pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
4841 INTEL_LPC_RCBA_REG, &rcba);
4842 if (!(rcba & INTEL_LPC_RCBA_ENABLE))
4843 return -EINVAL;
4844
4845 rcba_mem = ioremap(rcba & INTEL_LPC_RCBA_MASK,
4846 PAGE_ALIGN(INTEL_UPDCR_REG));
4847 if (!rcba_mem)
4848 return -ENOMEM;
4849
4850
4851
4852
4853
4854
4855
4856
4857 bspr = readl(rcba_mem + INTEL_BSPR_REG);
4858 bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD;
4859 if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
4860 updcr = readl(rcba_mem + INTEL_UPDCR_REG);
4861 if (updcr & INTEL_UPDCR_REG_MASK) {
4862 pci_info(dev, "Disabling UPDCR peer decodes\n");
4863 updcr &= ~INTEL_UPDCR_REG_MASK;
4864 writel(updcr, rcba_mem + INTEL_UPDCR_REG);
4865 }
4866 }
4867
4868 iounmap(rcba_mem);
4869 return 0;
4870}
4871
4872
4873#define INTEL_MPC_REG 0xd8
4874
4875#define INTEL_MPC_REG_IRBNCE (1 << 26)
4876
4877static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
4878{
4879 u32 mpc;
4880
4881
4882
4883
4884
4885
4886
4887 pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
4888 if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
4889 pci_info(dev, "Enabling MPC IRBNCE\n");
4890 mpc |= INTEL_MPC_REG_IRBNCE;
4891 pci_write_config_word(dev, INTEL_MPC_REG, mpc);
4892 }
4893}
4894
4895
4896
4897
4898
4899
4900
4901
4902static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
4903{
4904 if (!pci_quirk_intel_pch_acs_match(dev))
4905 return -ENOTTY;
4906
4907 if (pci_quirk_enable_intel_lpc_acs(dev)) {
4908 pci_warn(dev, "Failed to enable Intel PCH ACS quirk\n");
4909 return 0;
4910 }
4911
4912 pci_quirk_enable_intel_rp_mpc_acs(dev);
4913
4914 dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
4915
4916 pci_info(dev, "Intel PCH root port ACS workaround enabled\n");
4917
4918 return 0;
4919}
4920
4921static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
4922{
4923 int pos;
4924 u32 cap, ctrl;
4925
4926 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4927 return -ENOTTY;
4928
4929 pos = dev->acs_cap;
4930 if (!pos)
4931 return -ENOTTY;
4932
4933 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4934 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4935
4936 ctrl |= (cap & PCI_ACS_SV);
4937 ctrl |= (cap & PCI_ACS_RR);
4938 ctrl |= (cap & PCI_ACS_CR);
4939 ctrl |= (cap & PCI_ACS_UF);
4940
4941 if (dev->external_facing || dev->untrusted)
4942 ctrl |= (cap & PCI_ACS_TB);
4943
4944 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
4945
4946 pci_info(dev, "Intel SPT PCH root port ACS workaround enabled\n");
4947
4948 return 0;
4949}
4950
4951static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev)
4952{
4953 int pos;
4954 u32 cap, ctrl;
4955
4956 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4957 return -ENOTTY;
4958
4959 pos = dev->acs_cap;
4960 if (!pos)
4961 return -ENOTTY;
4962
4963 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4964 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4965
4966 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
4967
4968 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
4969
4970 pci_info(dev, "Intel SPT PCH root port workaround: disabled ACS redirect\n");
4971
4972 return 0;
4973}
4974
4975static const struct pci_dev_acs_ops {
4976 u16 vendor;
4977 u16 device;
4978 int (*enable_acs)(struct pci_dev *dev);
4979 int (*disable_acs_redir)(struct pci_dev *dev);
4980} pci_dev_acs_ops[] = {
4981 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
4982 .enable_acs = pci_quirk_enable_intel_pch_acs,
4983 },
4984 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
4985 .enable_acs = pci_quirk_enable_intel_spt_pch_acs,
4986 .disable_acs_redir = pci_quirk_disable_intel_spt_pch_acs_redir,
4987 },
4988};
4989
4990int pci_dev_specific_enable_acs(struct pci_dev *dev)
4991{
4992 const struct pci_dev_acs_ops *p;
4993 int i, ret;
4994
4995 for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
4996 p = &pci_dev_acs_ops[i];
4997 if ((p->vendor == dev->vendor ||
4998 p->vendor == (u16)PCI_ANY_ID) &&
4999 (p->device == dev->device ||
5000 p->device == (u16)PCI_ANY_ID) &&
5001 p->enable_acs) {
5002 ret = p->enable_acs(dev);
5003 if (ret >= 0)
5004 return ret;
5005 }
5006 }
5007
5008 return -ENOTTY;
5009}
5010
5011int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
5012{
5013 const struct pci_dev_acs_ops *p;
5014 int i, ret;
5015
5016 for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
5017 p = &pci_dev_acs_ops[i];
5018 if ((p->vendor == dev->vendor ||
5019 p->vendor == (u16)PCI_ANY_ID) &&
5020 (p->device == dev->device ||
5021 p->device == (u16)PCI_ANY_ID) &&
5022 p->disable_acs_redir) {
5023 ret = p->disable_acs_redir(dev);
5024 if (ret >= 0)
5025 return ret;
5026 }
5027 }
5028
5029 return -ENOTTY;
5030}
5031
5032
5033
5034
5035
5036
5037
5038
5039static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
5040{
5041 int pos, i = 0;
5042 u8 next_cap;
5043 u16 reg16, *cap;
5044 struct pci_cap_saved_state *state;
5045
5046
5047 if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP))
5048 return;
5049
5050
5051 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
5052 if (!pos)
5053 return;
5054
5055
5056
5057
5058
5059 pci_read_config_byte(pdev, pos + 1, &next_cap);
5060 if (next_cap)
5061 return;
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071 pos = 0x50;
5072 pci_read_config_word(pdev, pos, ®16);
5073 if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) {
5074 u32 status;
5075#ifndef PCI_EXP_SAVE_REGS
5076#define PCI_EXP_SAVE_REGS 7
5077#endif
5078 int size = PCI_EXP_SAVE_REGS * sizeof(u16);
5079
5080 pdev->pcie_cap = pos;
5081 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
5082 pdev->pcie_flags_reg = reg16;
5083 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16);
5084 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
5085
5086 pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
5087 if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
5088 PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
5089 pdev->cfg_size = PCI_CFG_SPACE_SIZE;
5090
5091 if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
5092 return;
5093
5094
5095 state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
5096 if (!state)
5097 return;
5098
5099 state->cap.cap_nr = PCI_CAP_ID_EXP;
5100 state->cap.cap_extended = 0;
5101 state->cap.size = size;
5102 cap = (u16 *)&state->cap.data[0];
5103 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]);
5104 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]);
5105 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]);
5106 pcie_capability_read_word(pdev, PCI_EXP_RTCTL, &cap[i++]);
5107 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]);
5108 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]);
5109 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]);
5110 hlist_add_head(&state->next, &pdev->saved_cap_space);
5111 }
5112}
5113DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
5114
5115
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125static void quirk_no_flr(struct pci_dev *dev)
5126{
5127 dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
5128}
5129DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
5130DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
5131DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
5132DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
5133DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
5134
5135static void quirk_no_ext_tags(struct pci_dev *pdev)
5136{
5137 struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
5138
5139 if (!bridge)
5140 return;
5141
5142 bridge->no_ext_tags = 1;
5143 pci_info(pdev, "disabling Extended Tags (this device can't handle them)\n");
5144
5145 pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
5146}
5147DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
5148DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
5149DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
5150DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags);
5151DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);
5152DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
5153DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
5154
5155#ifdef CONFIG_PCI_ATS
5156
5157
5158
5159
5160
5161static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
5162{
5163 if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
5164 (pdev->device == 0x7340 && pdev->revision != 0xc5))
5165 return;
5166
5167 pci_info(pdev, "disabling ATS\n");
5168 pdev->ats_cap = 0;
5169}
5170
5171
5172DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
5173
5174DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
5175
5176DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
5177
5178DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
5179#endif
5180
5181
5182static void quirk_fsl_no_msi(struct pci_dev *pdev)
5183{
5184 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
5185 pdev->no_msi = 1;
5186}
5187DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer,
5198 unsigned int supplier, unsigned int class,
5199 unsigned int class_shift)
5200{
5201 struct pci_dev *supplier_pdev;
5202
5203 if (PCI_FUNC(pdev->devfn) != consumer)
5204 return;
5205
5206 supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
5207 pdev->bus->number,
5208 PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier));
5209 if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) {
5210 pci_dev_put(supplier_pdev);
5211 return;
5212 }
5213
5214 if (device_link_add(&pdev->dev, &supplier_pdev->dev,
5215 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME))
5216 pci_info(pdev, "D0 power state depends on %s\n",
5217 pci_name(supplier_pdev));
5218 else
5219 pci_err(pdev, "Cannot enforce power dependency on %s\n",
5220 pci_name(supplier_pdev));
5221
5222 pm_runtime_allow(&pdev->dev);
5223 pci_dev_put(supplier_pdev);
5224}
5225
5226
5227
5228
5229
5230static void quirk_gpu_hda(struct pci_dev *hda)
5231{
5232 pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16);
5233}
5234DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
5235 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5236DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID,
5237 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5238DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5239 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5240
5241
5242
5243
5244
5245static void quirk_gpu_usb(struct pci_dev *usb)
5246{
5247 pci_create_device_link(usb, 2, 0, PCI_BASE_CLASS_DISPLAY, 16);
5248}
5249DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5250 PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
5251
5252
5253
5254
5255
5256
5257
5258#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80
5259static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
5260{
5261 pci_create_device_link(ucsi, 3, 0, PCI_BASE_CLASS_DISPLAY, 16);
5262}
5263DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5264 PCI_CLASS_SERIAL_UNKNOWN, 8,
5265 quirk_gpu_usb_typec_ucsi);
5266
5267
5268
5269
5270
5271static void quirk_nvidia_hda(struct pci_dev *gpu)
5272{
5273 u8 hdr_type;
5274 u32 val;
5275
5276
5277 if (gpu->device < PCI_DEVICE_ID_NVIDIA_GEFORCE_320M)
5278 return;
5279
5280
5281 pci_read_config_dword(gpu, 0x488, &val);
5282 if (val & BIT(25))
5283 return;
5284
5285 pci_info(gpu, "Enabling HDA controller\n");
5286 pci_write_config_dword(gpu, 0x488, val | BIT(25));
5287
5288
5289 pci_read_config_byte(gpu, PCI_HEADER_TYPE, &hdr_type);
5290 gpu->multifunction = !!(hdr_type & 0x80);
5291}
5292DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5293 PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
5294DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5295 PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
5296
5297
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout)
5323{
5324 int pos;
5325 u16 ctrl = 0;
5326 bool found;
5327 struct pci_dev *bridge = bus->self;
5328
5329 pos = bridge->acs_cap;
5330
5331
5332 if (pos) {
5333 pci_read_config_word(bridge, pos + PCI_ACS_CTRL, &ctrl);
5334 if (ctrl & PCI_ACS_SV)
5335 pci_write_config_word(bridge, pos + PCI_ACS_CTRL,
5336 ctrl & ~PCI_ACS_SV);
5337 }
5338
5339 found = pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
5340
5341
5342 if (found)
5343 pci_bus_write_config_word(bus, devfn, PCI_VENDOR_ID, 0);
5344
5345
5346 if (ctrl & PCI_ACS_SV)
5347 pci_write_config_word(bridge, pos + PCI_ACS_CTRL, ctrl);
5348
5349 return found;
5350}
5351
5352
5353
5354
5355
5356
5357
5358
5359static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
5360{
5361 void __iomem *mmio;
5362 struct ntb_info_regs __iomem *mmio_ntb;
5363 struct ntb_ctrl_regs __iomem *mmio_ctrl;
5364 u64 partition_map;
5365 u8 partition;
5366 int pp;
5367
5368 if (pci_enable_device(pdev)) {
5369 pci_err(pdev, "Cannot enable Switchtec device\n");
5370 return;
5371 }
5372
5373 mmio = pci_iomap(pdev, 0, 0);
5374 if (mmio == NULL) {
5375 pci_disable_device(pdev);
5376 pci_err(pdev, "Cannot iomap Switchtec device\n");
5377 return;
5378 }
5379
5380 pci_info(pdev, "Setting Switchtec proxy ID aliases\n");
5381
5382 mmio_ntb = mmio + SWITCHTEC_GAS_NTB_OFFSET;
5383 mmio_ctrl = (void __iomem *) mmio_ntb + SWITCHTEC_NTB_REG_CTRL_OFFSET;
5384
5385 partition = ioread8(&mmio_ntb->partition_id);
5386
5387 partition_map = ioread32(&mmio_ntb->ep_map);
5388 partition_map |= ((u64) ioread32(&mmio_ntb->ep_map + 4)) << 32;
5389 partition_map &= ~(1ULL << partition);
5390
5391 for (pp = 0; pp < (sizeof(partition_map) * 8); pp++) {
5392 struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
5393 u32 table_sz = 0;
5394 int te;
5395
5396 if (!(partition_map & (1ULL << pp)))
5397 continue;
5398
5399 pci_dbg(pdev, "Processing partition %d\n", pp);
5400
5401 mmio_peer_ctrl = &mmio_ctrl[pp];
5402
5403 table_sz = ioread16(&mmio_peer_ctrl->req_id_table_size);
5404 if (!table_sz) {
5405 pci_warn(pdev, "Partition %d table_sz 0\n", pp);
5406 continue;
5407 }
5408
5409 if (table_sz > 512) {
5410 pci_warn(pdev,
5411 "Invalid Switchtec partition %d table_sz %d\n",
5412 pp, table_sz);
5413 continue;
5414 }
5415
5416 for (te = 0; te < table_sz; te++) {
5417 u32 rid_entry;
5418 u8 devfn;
5419
5420 rid_entry = ioread32(&mmio_peer_ctrl->req_id_table[te]);
5421 devfn = (rid_entry >> 1) & 0xFF;
5422 pci_dbg(pdev,
5423 "Aliasing Partition %d Proxy ID %02x.%d\n",
5424 pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
5425 pci_add_dma_alias(pdev, devfn, 1);
5426 }
5427 }
5428
5429 pci_iounmap(pdev, mmio);
5430 pci_disable_device(pdev);
5431}
5432#define SWITCHTEC_QUIRK(vid) \
5433 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
5434 PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
5435
5436SWITCHTEC_QUIRK(0x8531);
5437SWITCHTEC_QUIRK(0x8532);
5438SWITCHTEC_QUIRK(0x8533);
5439SWITCHTEC_QUIRK(0x8534);
5440SWITCHTEC_QUIRK(0x8535);
5441SWITCHTEC_QUIRK(0x8536);
5442SWITCHTEC_QUIRK(0x8541);
5443SWITCHTEC_QUIRK(0x8542);
5444SWITCHTEC_QUIRK(0x8543);
5445SWITCHTEC_QUIRK(0x8544);
5446SWITCHTEC_QUIRK(0x8545);
5447SWITCHTEC_QUIRK(0x8546);
5448SWITCHTEC_QUIRK(0x8551);
5449SWITCHTEC_QUIRK(0x8552);
5450SWITCHTEC_QUIRK(0x8553);
5451SWITCHTEC_QUIRK(0x8554);
5452SWITCHTEC_QUIRK(0x8555);
5453SWITCHTEC_QUIRK(0x8556);
5454SWITCHTEC_QUIRK(0x8561);
5455SWITCHTEC_QUIRK(0x8562);
5456SWITCHTEC_QUIRK(0x8563);
5457SWITCHTEC_QUIRK(0x8564);
5458SWITCHTEC_QUIRK(0x8565);
5459SWITCHTEC_QUIRK(0x8566);
5460SWITCHTEC_QUIRK(0x8571);
5461SWITCHTEC_QUIRK(0x8572);
5462SWITCHTEC_QUIRK(0x8573);
5463SWITCHTEC_QUIRK(0x8574);
5464SWITCHTEC_QUIRK(0x8575);
5465SWITCHTEC_QUIRK(0x8576);
5466SWITCHTEC_QUIRK(0x4000);
5467SWITCHTEC_QUIRK(0x4084);
5468SWITCHTEC_QUIRK(0x4068);
5469SWITCHTEC_QUIRK(0x4052);
5470SWITCHTEC_QUIRK(0x4036);
5471SWITCHTEC_QUIRK(0x4028);
5472SWITCHTEC_QUIRK(0x4100);
5473SWITCHTEC_QUIRK(0x4184);
5474SWITCHTEC_QUIRK(0x4168);
5475SWITCHTEC_QUIRK(0x4152);
5476SWITCHTEC_QUIRK(0x4136);
5477SWITCHTEC_QUIRK(0x4128);
5478SWITCHTEC_QUIRK(0x4200);
5479SWITCHTEC_QUIRK(0x4284);
5480SWITCHTEC_QUIRK(0x4268);
5481SWITCHTEC_QUIRK(0x4252);
5482SWITCHTEC_QUIRK(0x4236);
5483SWITCHTEC_QUIRK(0x4228);
5484
5485
5486
5487
5488
5489
5490
5491static void quirk_plx_ntb_dma_alias(struct pci_dev *pdev)
5492{
5493 pci_info(pdev, "Setting PLX NTB proxy ID aliases\n");
5494
5495 pci_add_dma_alias(pdev, 0, 256);
5496}
5497DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b0, quirk_plx_ntb_dma_alias);
5498DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b1, quirk_plx_ntb_dma_alias);
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
5517{
5518 void __iomem *map;
5519 int ret;
5520
5521 if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
5522 pdev->subsystem_device != 0x222e ||
5523 !pdev->reset_fn)
5524 return;
5525
5526 if (pci_enable_device_mem(pdev))
5527 return;
5528
5529
5530
5531
5532
5533 map = pci_iomap(pdev, 0, 0x23000);
5534 if (!map) {
5535 pci_err(pdev, "Can't map MMIO space\n");
5536 goto out_disable;
5537 }
5538
5539
5540
5541
5542
5543 if (ioread32(map + 0x2240c) & 0x2) {
5544 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
5545 ret = pci_reset_bus(pdev);
5546 if (ret < 0)
5547 pci_err(pdev, "Failed to reset GPU: %d\n", ret);
5548 }
5549
5550 iounmap(map);
5551out_disable:
5552 pci_disable_device(pdev);
5553}
5554DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
5555 PCI_CLASS_DISPLAY_VGA, 8,
5556 quirk_reset_lenovo_thinkpad_p50_nvgpu);
5557
5558
5559
5560
5561
5562static void pci_fixup_no_d0_pme(struct pci_dev *dev)
5563{
5564 pci_info(dev, "PME# does not work under D0, disabling it\n");
5565 dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
5566}
5567DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
5568
5569
5570
5571
5572
5573
5574static void pci_fixup_no_pme(struct pci_dev *dev)
5575{
5576 pci_info(dev, "PME# is unreliable, disabling it\n");
5577 dev->pme_support = 0;
5578}
5579DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_pme);
5580DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_pme);
5581
5582static void apex_pci_fixup_class(struct pci_dev *pdev)
5583{
5584 pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
5585}
5586DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a,
5587 PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
5588