1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/pci.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/acpi.h>
22#include <linux/dmi.h>
23#include <linux/ioport.h>
24#include <linux/sched.h>
25#include <linux/ktime.h>
26#include <linux/mm.h>
27#include <linux/nvme.h>
28#include <linux/platform_data/x86/apple.h>
29#include <linux/pm_runtime.h>
30#include <linux/switchtec.h>
31#include <asm/dma.h>
32#include "pci.h"
33
34static ktime_t fixup_debug_start(struct pci_dev *dev,
35 void (*fn)(struct pci_dev *dev))
36{
37 if (initcall_debug)
38 pci_info(dev, "calling %pS @ %i\n", fn, task_pid_nr(current));
39
40 return ktime_get();
41}
42
43static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
44 void (*fn)(struct pci_dev *dev))
45{
46 ktime_t delta, rettime;
47 unsigned long long duration;
48
49 rettime = ktime_get();
50 delta = ktime_sub(rettime, calltime);
51 duration = (unsigned long long) ktime_to_ns(delta) >> 10;
52 if (initcall_debug || duration > 10000)
53 pci_info(dev, "%pS took %lld usecs\n", fn, duration);
54}
55
56static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
57 struct pci_fixup *end)
58{
59 ktime_t calltime;
60
61 for (; f < end; f++)
62 if ((f->class == (u32) (dev->class >> f->class_shift) ||
63 f->class == (u32) PCI_ANY_ID) &&
64 (f->vendor == dev->vendor ||
65 f->vendor == (u16) PCI_ANY_ID) &&
66 (f->device == dev->device ||
67 f->device == (u16) PCI_ANY_ID)) {
68 void (*hook)(struct pci_dev *dev);
69#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
70 hook = offset_to_ptr(&f->hook_offset);
71#else
72 hook = f->hook;
73#endif
74 calltime = fixup_debug_start(dev, hook);
75 hook(dev);
76 fixup_debug_report(dev, calltime, hook);
77 }
78}
79
80extern struct pci_fixup __start_pci_fixups_early[];
81extern struct pci_fixup __end_pci_fixups_early[];
82extern struct pci_fixup __start_pci_fixups_header[];
83extern struct pci_fixup __end_pci_fixups_header[];
84extern struct pci_fixup __start_pci_fixups_final[];
85extern struct pci_fixup __end_pci_fixups_final[];
86extern struct pci_fixup __start_pci_fixups_enable[];
87extern struct pci_fixup __end_pci_fixups_enable[];
88extern struct pci_fixup __start_pci_fixups_resume[];
89extern struct pci_fixup __end_pci_fixups_resume[];
90extern struct pci_fixup __start_pci_fixups_resume_early[];
91extern struct pci_fixup __end_pci_fixups_resume_early[];
92extern struct pci_fixup __start_pci_fixups_suspend[];
93extern struct pci_fixup __end_pci_fixups_suspend[];
94extern struct pci_fixup __start_pci_fixups_suspend_late[];
95extern struct pci_fixup __end_pci_fixups_suspend_late[];
96
97static bool pci_apply_fixup_final_quirks;
98
99void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
100{
101 struct pci_fixup *start, *end;
102
103 switch (pass) {
104 case pci_fixup_early:
105 start = __start_pci_fixups_early;
106 end = __end_pci_fixups_early;
107 break;
108
109 case pci_fixup_header:
110 start = __start_pci_fixups_header;
111 end = __end_pci_fixups_header;
112 break;
113
114 case pci_fixup_final:
115 if (!pci_apply_fixup_final_quirks)
116 return;
117 start = __start_pci_fixups_final;
118 end = __end_pci_fixups_final;
119 break;
120
121 case pci_fixup_enable:
122 start = __start_pci_fixups_enable;
123 end = __end_pci_fixups_enable;
124 break;
125
126 case pci_fixup_resume:
127 start = __start_pci_fixups_resume;
128 end = __end_pci_fixups_resume;
129 break;
130
131 case pci_fixup_resume_early:
132 start = __start_pci_fixups_resume_early;
133 end = __end_pci_fixups_resume_early;
134 break;
135
136 case pci_fixup_suspend:
137 start = __start_pci_fixups_suspend;
138 end = __end_pci_fixups_suspend;
139 break;
140
141 case pci_fixup_suspend_late:
142 start = __start_pci_fixups_suspend_late;
143 end = __end_pci_fixups_suspend_late;
144 break;
145
146 default:
147
148 return;
149 }
150 pci_do_fixups(dev, start, end);
151}
152EXPORT_SYMBOL(pci_fixup_device);
153
154static int __init pci_apply_final_quirks(void)
155{
156 struct pci_dev *dev = NULL;
157 u8 cls = 0;
158 u8 tmp;
159
160 if (pci_cache_line_size)
161 pr_info("PCI: CLS %u bytes\n", pci_cache_line_size << 2);
162
163 pci_apply_fixup_final_quirks = true;
164 for_each_pci_dev(dev) {
165 pci_fixup_device(pci_fixup_final, dev);
166
167
168
169
170
171 if (!pci_cache_line_size) {
172 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
173 if (!cls)
174 cls = tmp;
175 if (!tmp || cls == tmp)
176 continue;
177
178 pci_info(dev, "CLS mismatch (%u != %u), using %u bytes\n",
179 cls << 2, tmp << 2,
180 pci_dfl_cache_line_size << 2);
181 pci_cache_line_size = pci_dfl_cache_line_size;
182 }
183 }
184
185 if (!pci_cache_line_size) {
186 pr_info("PCI: CLS %u bytes, default %u\n", cls << 2,
187 pci_dfl_cache_line_size << 2);
188 pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
189 }
190
191 return 0;
192}
193fs_initcall_sync(pci_apply_final_quirks);
194
195
196
197
198
199
200
201static void quirk_mmio_always_on(struct pci_dev *dev)
202{
203 dev->mmio_always_on = 1;
204}
205DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
206 PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
207
208
209
210
211
212
213static void quirk_mellanox_tavor(struct pci_dev *dev)
214{
215 dev->broken_parity_status = 1;
216}
217DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
218DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
219
220
221
222
223
224static void quirk_passive_release(struct pci_dev *dev)
225{
226 struct pci_dev *d = NULL;
227 unsigned char dlc;
228
229
230
231
232
233 while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
234 pci_read_config_byte(d, 0x82, &dlc);
235 if (!(dlc & 1<<1)) {
236 pci_info(d, "PIIX3: Enabling Passive Release\n");
237 dlc |= 1<<1;
238 pci_write_config_byte(d, 0x82, dlc);
239 }
240 }
241}
242DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
243DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
244
245
246
247
248
249
250
251
252
253static void quirk_isa_dma_hangs(struct pci_dev *dev)
254{
255 if (!isa_dma_bridge_buggy) {
256 isa_dma_bridge_buggy = 1;
257 pci_info(dev, "Activating ISA DMA hang workarounds\n");
258 }
259}
260
261
262
263
264DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
265DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
266DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
267DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
268DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
269DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
270DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
271
272
273
274
275
276static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
277{
278 u32 pmbase;
279 u16 pm1a;
280
281 pci_read_config_dword(dev, 0x40, &pmbase);
282 pmbase = pmbase & 0xff80;
283 pm1a = inw(pmbase);
284
285 if (pm1a & 0x10) {
286 pci_info(dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
287 outw(0x10, pmbase);
288 }
289}
290DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
291
292
293static void quirk_nopcipci(struct pci_dev *dev)
294{
295 if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
296 pci_info(dev, "Disabling direct PCI/PCI transfers\n");
297 pci_pci_problems |= PCIPCI_FAIL;
298 }
299}
300DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci);
301DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci);
302
303static void quirk_nopciamd(struct pci_dev *dev)
304{
305 u8 rev;
306 pci_read_config_byte(dev, 0x08, &rev);
307 if (rev == 0x13) {
308
309 pci_info(dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
310 pci_pci_problems |= PCIAGP_FAIL;
311 }
312}
313DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd);
314
315
316static void quirk_triton(struct pci_dev *dev)
317{
318 if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
319 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
320 pci_pci_problems |= PCIPCI_TRITON;
321 }
322}
323DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
324DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
325DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
326DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
327
328
329
330
331
332
333
334
335
336
337
338static void quirk_vialatency(struct pci_dev *dev)
339{
340 struct pci_dev *p;
341 u8 busarb;
342
343
344
345
346
347 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
348 if (p != NULL) {
349
350
351
352
353
354
355 if (p->revision < 0x40 || p->revision > 0x42)
356 goto exit;
357 } else {
358 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
359 if (p == NULL)
360 goto exit;
361
362
363 if (p->revision < 0x10 || p->revision > 0x12)
364 goto exit;
365 }
366
367
368
369
370
371
372
373
374
375
376
377
378
379 pci_read_config_byte(dev, 0x76, &busarb);
380
381
382
383
384
385 busarb &= ~(1<<5);
386 busarb |= (1<<4);
387 pci_write_config_byte(dev, 0x76, busarb);
388 pci_info(dev, "Applying VIA southbridge workaround\n");
389exit:
390 pci_dev_put(p);
391}
392DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
393DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
394DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
395
396DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
397DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
398DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
399
400
401static void quirk_viaetbf(struct pci_dev *dev)
402{
403 if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
404 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
405 pci_pci_problems |= PCIPCI_VIAETBF;
406 }
407}
408DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf);
409
410static void quirk_vsfx(struct pci_dev *dev)
411{
412 if ((pci_pci_problems&PCIPCI_VSFX) == 0) {
413 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
414 pci_pci_problems |= PCIPCI_VSFX;
415 }
416}
417DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx);
418
419
420
421
422
423
424static void quirk_alimagik(struct pci_dev *dev)
425{
426 if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) {
427 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
428 pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
429 }
430}
431DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
432DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
433
434
435static void quirk_natoma(struct pci_dev *dev)
436{
437 if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
438 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
439 pci_pci_problems |= PCIPCI_NATOMA;
440 }
441}
442DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
443DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
444DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
445DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
446DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
447DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
448
449
450
451
452
453static void quirk_citrine(struct pci_dev *dev)
454{
455 dev->cfg_size = 0xA0;
456}
457DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
458
459
460
461
462
463static void quirk_nfp6000(struct pci_dev *dev)
464{
465 dev->cfg_size = 0x600;
466}
467DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000);
468DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000);
469DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, quirk_nfp6000);
470DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000);
471
472
473static void quirk_extend_bar_to_page(struct pci_dev *dev)
474{
475 int i;
476
477 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
478 struct resource *r = &dev->resource[i];
479
480 if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
481 r->end = PAGE_SIZE - 1;
482 r->start = 0;
483 r->flags |= IORESOURCE_UNSET;
484 pci_info(dev, "expanded BAR %d to page size: %pR\n",
485 i, r);
486 }
487 }
488}
489DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
490
491
492
493
494
495static void quirk_s3_64M(struct pci_dev *dev)
496{
497 struct resource *r = &dev->resource[0];
498
499 if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
500 r->flags |= IORESOURCE_UNSET;
501 r->start = 0;
502 r->end = 0x3ffffff;
503 }
504}
505DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
506DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
507
508static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
509 const char *name)
510{
511 u32 region;
512 struct pci_bus_region bus_region;
513 struct resource *res = dev->resource + pos;
514
515 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion);
516
517 if (!region)
518 return;
519
520 res->name = pci_name(dev);
521 res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
522 res->flags |=
523 (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
524 region &= ~(size - 1);
525
526
527 bus_region.start = region;
528 bus_region.end = region + size - 1;
529 pcibios_bus_to_resource(dev->bus, res, &bus_region);
530
531 pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
532 name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
533}
534
535
536
537
538
539
540
541
542
543
544static void quirk_cs5536_vsa(struct pci_dev *dev)
545{
546 static char *name = "CS5536 ISA bridge";
547
548 if (pci_resource_len(dev, 0) != 8) {
549 quirk_io(dev, 0, 8, name);
550 quirk_io(dev, 1, 256, name);
551 quirk_io(dev, 2, 64, name);
552 pci_info(dev, "%s bug detected (incorrect header); workaround applied\n",
553 name);
554 }
555}
556DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
557
558static void quirk_io_region(struct pci_dev *dev, int port,
559 unsigned size, int nr, const char *name)
560{
561 u16 region;
562 struct pci_bus_region bus_region;
563 struct resource *res = dev->resource + nr;
564
565 pci_read_config_word(dev, port, ®ion);
566 region &= ~(size - 1);
567
568 if (!region)
569 return;
570
571 res->name = pci_name(dev);
572 res->flags = IORESOURCE_IO;
573
574
575 bus_region.start = region;
576 bus_region.end = region + size - 1;
577 pcibios_bus_to_resource(dev->bus, res, &bus_region);
578
579 if (!pci_claim_resource(dev, nr))
580 pci_info(dev, "quirk: %pR claimed by %s\n", res, name);
581}
582
583
584
585
586
587static void quirk_ati_exploding_mce(struct pci_dev *dev)
588{
589 pci_info(dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
590
591 request_region(0x3b0, 0x0C, "RadeonIGP");
592 request_region(0x3d3, 0x01, "RadeonIGP");
593}
594DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce);
595
596
597
598
599
600
601
602
603
604
605
606
607static void quirk_amd_nl_class(struct pci_dev *pdev)
608{
609 u32 class = pdev->class;
610
611
612 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
613 pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
614 class, pdev->class);
615}
616DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
617 quirk_amd_nl_class);
618
619
620
621
622
623
624
625
626static void quirk_synopsys_haps(struct pci_dev *pdev)
627{
628 u32 class = pdev->class;
629
630 switch (pdev->device) {
631 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3:
632 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI:
633 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31:
634 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
635 pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
636 class, pdev->class);
637 break;
638 }
639}
640DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
641 PCI_CLASS_SERIAL_USB_XHCI, 0,
642 quirk_synopsys_haps);
643
644
645
646
647
648
649
650
651
652
653
654static void quirk_ali7101_acpi(struct pci_dev *dev)
655{
656 quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
657 quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
658}
659DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi);
660
661static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
662{
663 u32 devres;
664 u32 mask, size, base;
665
666 pci_read_config_dword(dev, port, &devres);
667 if ((devres & enable) != enable)
668 return;
669 mask = (devres >> 16) & 15;
670 base = devres & 0xffff;
671 size = 16;
672 for (;;) {
673 unsigned bit = size >> 1;
674 if ((bit & mask) == bit)
675 break;
676 size = bit;
677 }
678
679
680
681
682
683 base &= -size;
684 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
685}
686
687static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
688{
689 u32 devres;
690 u32 mask, size, base;
691
692 pci_read_config_dword(dev, port, &devres);
693 if ((devres & enable) != enable)
694 return;
695 base = devres & 0xffff0000;
696 mask = (devres & 0x3f) << 16;
697 size = 128 << 16;
698 for (;;) {
699 unsigned bit = size >> 1;
700 if ((bit & mask) == bit)
701 break;
702 size = bit;
703 }
704
705
706
707
708
709 base &= -size;
710 pci_info(dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
711}
712
713
714
715
716
717
718
719static void quirk_piix4_acpi(struct pci_dev *dev)
720{
721 u32 res_a;
722
723 quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
724 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
725
726
727 pci_read_config_dword(dev, 0x5c, &res_a);
728
729 piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
730 piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
731
732
733
734
735 if (res_a & (1 << 29)) {
736 piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
737 piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
738 }
739
740 if (res_a & (1 << 30)) {
741 piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
742 piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
743 }
744 piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
745 piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
746}
747DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi);
748DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi);
749
750#define ICH_PMBASE 0x40
751#define ICH_ACPI_CNTL 0x44
752#define ICH4_ACPI_EN 0x10
753#define ICH6_ACPI_EN 0x80
754#define ICH4_GPIOBASE 0x58
755#define ICH4_GPIO_CNTL 0x5c
756#define ICH4_GPIO_EN 0x10
757#define ICH6_GPIOBASE 0x48
758#define ICH6_GPIO_CNTL 0x4c
759#define ICH6_GPIO_EN 0x10
760
761
762
763
764
765
766static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
767{
768 u8 enable;
769
770
771
772
773
774
775
776
777 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
778 if (enable & ICH4_ACPI_EN)
779 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
780 "ICH4 ACPI/GPIO/TCO");
781
782 pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
783 if (enable & ICH4_GPIO_EN)
784 quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
785 "ICH4 GPIO");
786}
787DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
788DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
789DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi);
790DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi);
791DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi);
792DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi);
793DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi);
794DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi);
795DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi);
796DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi);
797
798static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
799{
800 u8 enable;
801
802 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
803 if (enable & ICH6_ACPI_EN)
804 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
805 "ICH6 ACPI/GPIO/TCO");
806
807 pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
808 if (enable & ICH6_GPIO_EN)
809 quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
810 "ICH6 GPIO");
811}
812
813static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
814 const char *name, int dynsize)
815{
816 u32 val;
817 u32 size, base;
818
819 pci_read_config_dword(dev, reg, &val);
820
821
822 if (!(val & 1))
823 return;
824 base = val & 0xfffc;
825 if (dynsize) {
826
827
828
829
830
831
832 size = 16;
833 } else {
834 size = 128;
835 }
836 base &= ~(size-1);
837
838
839
840
841
842 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
843}
844
845static void quirk_ich6_lpc(struct pci_dev *dev)
846{
847
848 ich6_lpc_acpi_gpio(dev);
849
850
851 ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
852 ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
853}
854DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
855DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
856
857static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
858 const char *name)
859{
860 u32 val;
861 u32 mask, base;
862
863 pci_read_config_dword(dev, reg, &val);
864
865
866 if (!(val & 1))
867 return;
868
869
870 base = val & 0xfffc;
871 mask = (val >> 16) & 0xfc;
872 mask |= 3;
873
874
875
876
877
878 pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
879}
880
881
882static void quirk_ich7_lpc(struct pci_dev *dev)
883{
884
885 ich6_lpc_acpi_gpio(dev);
886
887
888 ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
889 ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
890 ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
891 ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
892}
893DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc);
894DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc);
895DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc);
896DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc);
897DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc);
898DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc);
899DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc);
900DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc);
901DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc);
902DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc);
903DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc);
904DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc);
905DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc);
906
907
908
909
910
911static void quirk_vt82c586_acpi(struct pci_dev *dev)
912{
913 if (dev->revision & 0x10)
914 quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
915 "vt82c586 ACPI");
916}
917DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi);
918
919
920
921
922
923
924
925static void quirk_vt82c686_acpi(struct pci_dev *dev)
926{
927 quirk_vt82c586_acpi(dev);
928
929 quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
930 "vt82c686 HW-mon");
931
932 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
933}
934DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi);
935
936
937
938
939
940
941static void quirk_vt8235_acpi(struct pci_dev *dev)
942{
943 quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
944 quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
945}
946DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
947
948
949
950
951
952static void quirk_xio2000a(struct pci_dev *dev)
953{
954 struct pci_dev *pdev;
955 u16 command;
956
957 pci_warn(dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
958 list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
959 pci_read_config_word(pdev, PCI_COMMAND, &command);
960 if (command & PCI_COMMAND_FAST_BACK)
961 pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
962 }
963}
964DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
965 quirk_xio2000a);
966
967#ifdef CONFIG_X86_IO_APIC
968
969#include <asm/io_apic.h>
970
971
972
973
974
975
976
977
978static void quirk_via_ioapic(struct pci_dev *dev)
979{
980 u8 tmp;
981
982 if (nr_ioapics < 1)
983 tmp = 0;
984 else
985 tmp = 0x1f;
986
987 pci_info(dev, "%sbling VIA external APIC routing\n",
988 tmp == 0 ? "Disa" : "Ena");
989
990
991 pci_write_config_byte(dev, 0x58, tmp);
992}
993DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
994DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
995
996
997
998
999
1000
1001
1002static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
1003{
1004 u8 misc_control2;
1005#define BYPASS_APIC_DEASSERT 8
1006
1007 pci_read_config_byte(dev, 0x5B, &misc_control2);
1008 if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
1009 pci_info(dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
1010 pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
1011 }
1012}
1013DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
1014DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025static void quirk_amd_ioapic(struct pci_dev *dev)
1026{
1027 if (dev->revision >= 0x02) {
1028 pci_warn(dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
1029 pci_warn(dev, " : booting with the \"noapic\" option\n");
1030 }
1031}
1032DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
1033#endif
1034
1035#if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS)
1036
1037static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
1038{
1039
1040 if (dev->subsystem_device == 0xa118)
1041 dev->sriov->link = dev->devfn;
1042}
1043DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link);
1044#endif
1045
1046
1047
1048
1049
1050static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
1051{
1052 if (dev->subordinate && dev->revision <= 0x12) {
1053 pci_info(dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
1054 dev->revision);
1055 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
1056 }
1057}
1058DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
1059
1060
1061
1062
1063
1064
1065
1066
1067static void quirk_via_acpi(struct pci_dev *d)
1068{
1069 u8 irq;
1070
1071
1072 pci_read_config_byte(d, 0x42, &irq);
1073 irq &= 0xf;
1074 if (irq && (irq != 2))
1075 d->irq = irq;
1076}
1077DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi);
1078DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi);
1079
1080
1081static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
1082
1083static void quirk_via_bridge(struct pci_dev *dev)
1084{
1085
1086 switch (dev->device) {
1087 case PCI_DEVICE_ID_VIA_82C686:
1088
1089
1090
1091
1092
1093 via_vlink_dev_lo = PCI_SLOT(dev->devfn);
1094 via_vlink_dev_hi = PCI_SLOT(dev->devfn);
1095 break;
1096 case PCI_DEVICE_ID_VIA_8237:
1097 case PCI_DEVICE_ID_VIA_8237A:
1098 via_vlink_dev_lo = 15;
1099 break;
1100 case PCI_DEVICE_ID_VIA_8235:
1101 via_vlink_dev_lo = 16;
1102 break;
1103 case PCI_DEVICE_ID_VIA_8231:
1104 case PCI_DEVICE_ID_VIA_8233_0:
1105 case PCI_DEVICE_ID_VIA_8233A:
1106 case PCI_DEVICE_ID_VIA_8233C_0:
1107 via_vlink_dev_lo = 17;
1108 break;
1109 }
1110}
1111DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_bridge);
1112DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, quirk_via_bridge);
1113DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0, quirk_via_bridge);
1114DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A, quirk_via_bridge);
1115DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233C_0, quirk_via_bridge);
1116DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_bridge);
1117DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge);
1118DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge);
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131static void quirk_via_vlink(struct pci_dev *dev)
1132{
1133 u8 irq, new_irq;
1134
1135
1136 if (via_vlink_dev_lo == -1)
1137 return;
1138
1139 new_irq = dev->irq;
1140
1141
1142 if (!new_irq || new_irq > 15)
1143 return;
1144
1145
1146 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi ||
1147 PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
1148 return;
1149
1150
1151
1152
1153
1154 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1155 if (new_irq != irq) {
1156 pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n",
1157 irq, new_irq);
1158 udelay(15);
1159 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
1160 }
1161}
1162DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
1163
1164
1165
1166
1167
1168
1169static void quirk_vt82c598_id(struct pci_dev *dev)
1170{
1171 pci_write_config_byte(dev, 0xfc, 0);
1172 pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
1173}
1174DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id);
1175
1176
1177
1178
1179
1180
1181
1182static void quirk_cardbus_legacy(struct pci_dev *dev)
1183{
1184 pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
1185}
1186DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1187 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
1188DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
1189 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
1190
1191
1192
1193
1194
1195
1196
1197
1198static void quirk_amd_ordering(struct pci_dev *dev)
1199{
1200 u32 pcic;
1201 pci_read_config_dword(dev, 0x4C, &pcic);
1202 if ((pcic & 6) != 6) {
1203 pcic |= 6;
1204 pci_warn(dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
1205 pci_write_config_dword(dev, 0x4C, pcic);
1206 pci_read_config_dword(dev, 0x84, &pcic);
1207 pcic |= (1 << 23);
1208 pci_write_config_dword(dev, 0x84, pcic);
1209 }
1210}
1211DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1212DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1213
1214
1215
1216
1217
1218
1219
1220
1221static void quirk_dunord(struct pci_dev *dev)
1222{
1223 struct resource *r = &dev->resource[1];
1224
1225 r->flags |= IORESOURCE_UNSET;
1226 r->start = 0;
1227 r->end = 0xffffff;
1228}
1229DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
1230
1231
1232
1233
1234
1235
1236static void quirk_transparent_bridge(struct pci_dev *dev)
1237{
1238 dev->transparent = 1;
1239}
1240DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge);
1241DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge);
1242
1243
1244
1245
1246
1247
1248
1249static void quirk_mediagx_master(struct pci_dev *dev)
1250{
1251 u8 reg;
1252
1253 pci_read_config_byte(dev, 0x41, ®);
1254 if (reg & 2) {
1255 reg &= ~2;
1256 pci_info(dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
1257 reg);
1258 pci_write_config_byte(dev, 0x41, reg);
1259 }
1260}
1261DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1262DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1263
1264
1265
1266
1267
1268
1269static void quirk_disable_pxb(struct pci_dev *pdev)
1270{
1271 u16 config;
1272
1273 if (pdev->revision != 0x04)
1274 return;
1275 pci_read_config_word(pdev, 0x40, &config);
1276 if (config & (1<<6)) {
1277 config &= ~(1<<6);
1278 pci_write_config_word(pdev, 0x40, config);
1279 pci_info(pdev, "C0 revision 450NX. Disabling PCI restreaming\n");
1280 }
1281}
1282DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
1283DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
1284
1285static void quirk_amd_ide_mode(struct pci_dev *pdev)
1286{
1287
1288 u8 tmp;
1289
1290 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
1291 if (tmp == 0x01) {
1292 pci_read_config_byte(pdev, 0x40, &tmp);
1293 pci_write_config_byte(pdev, 0x40, tmp|1);
1294 pci_write_config_byte(pdev, 0x9, 1);
1295 pci_write_config_byte(pdev, 0xa, 6);
1296 pci_write_config_byte(pdev, 0x40, tmp);
1297
1298 pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
1299 pci_info(pdev, "set SATA to AHCI mode\n");
1300 }
1301}
1302DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1303DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1304DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1305DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1306DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1307DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1308DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1309DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1310
1311
1312static void quirk_svwks_csb5ide(struct pci_dev *pdev)
1313{
1314 u8 prog;
1315 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1316 if (prog & 5) {
1317 prog &= ~5;
1318 pdev->class &= ~5;
1319 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1320
1321 }
1322}
1323DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
1324
1325
1326static void quirk_ide_samemode(struct pci_dev *pdev)
1327{
1328 u8 prog;
1329
1330 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1331
1332 if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
1333 pci_info(pdev, "IDE mode mismatch; forcing legacy mode\n");
1334 prog &= ~5;
1335 pdev->class &= ~5;
1336 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1337 }
1338}
1339DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
1340
1341
1342static void quirk_no_ata_d3(struct pci_dev *pdev)
1343{
1344 pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
1345}
1346
1347DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID,
1348 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1349DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
1350 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1351
1352DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
1353 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1354
1355
1356DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
1357 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1358
1359
1360
1361
1362
1363static void quirk_eisa_bridge(struct pci_dev *dev)
1364{
1365 dev->class = PCI_CLASS_BRIDGE_EISA << 8;
1366}
1367DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge);
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394static int asus_hides_smbus;
1395
1396static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
1397{
1398 if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1399 if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
1400 switch (dev->subsystem_device) {
1401 case 0x8025:
1402 case 0x8070:
1403 case 0x8088:
1404 case 0x1626:
1405 asus_hides_smbus = 1;
1406 }
1407 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
1408 switch (dev->subsystem_device) {
1409 case 0x80b1:
1410 case 0x80b2:
1411 case 0x8093:
1412 asus_hides_smbus = 1;
1413 }
1414 else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
1415 switch (dev->subsystem_device) {
1416 case 0x8030:
1417 asus_hides_smbus = 1;
1418 }
1419 else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
1420 switch (dev->subsystem_device) {
1421 case 0x8070:
1422 asus_hides_smbus = 1;
1423 }
1424 else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
1425 switch (dev->subsystem_device) {
1426 case 0x80c9:
1427 asus_hides_smbus = 1;
1428 }
1429 else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
1430 switch (dev->subsystem_device) {
1431 case 0x1751:
1432 case 0x1821:
1433 case 0x1897:
1434 asus_hides_smbus = 1;
1435 }
1436 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1437 switch (dev->subsystem_device) {
1438 case 0x184b:
1439 case 0x186a:
1440 asus_hides_smbus = 1;
1441 }
1442 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1443 switch (dev->subsystem_device) {
1444 case 0x80f2:
1445 asus_hides_smbus = 1;
1446 }
1447 else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
1448 switch (dev->subsystem_device) {
1449 case 0x1882:
1450 case 0x1977:
1451 asus_hides_smbus = 1;
1452 }
1453 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
1454 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1455 switch (dev->subsystem_device) {
1456 case 0x088C:
1457 case 0x0890:
1458 asus_hides_smbus = 1;
1459 }
1460 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1461 switch (dev->subsystem_device) {
1462 case 0x12bc:
1463 case 0x12bd:
1464 case 0x006a:
1465 asus_hides_smbus = 1;
1466 }
1467 else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
1468 switch (dev->subsystem_device) {
1469 case 0x12bf:
1470 asus_hides_smbus = 1;
1471 }
1472 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
1473 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1474 switch (dev->subsystem_device) {
1475 case 0xC00C:
1476 asus_hides_smbus = 1;
1477 }
1478 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
1479 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1480 switch (dev->subsystem_device) {
1481 case 0x0058:
1482 asus_hides_smbus = 1;
1483 }
1484 else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
1485 switch (dev->subsystem_device) {
1486 case 0xB16C:
1487
1488
1489
1490 asus_hides_smbus = 1;
1491 }
1492 else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
1493 switch (dev->subsystem_device) {
1494 case 0x00b8:
1495 case 0x00b9:
1496 case 0x00ba:
1497
1498
1499
1500
1501
1502 asus_hides_smbus = 1;
1503 }
1504 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
1505 switch (dev->subsystem_device) {
1506 case 0x001A:
1507
1508
1509
1510 asus_hides_smbus = 1;
1511 }
1512 }
1513}
1514DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge);
1515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge);
1516DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge);
1517DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge);
1518DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, asus_hides_smbus_hostbridge);
1519DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge);
1520DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge);
1521DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge);
1522DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge);
1523DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
1524
1525DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
1526DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
1527DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
1528
1529static void asus_hides_smbus_lpc(struct pci_dev *dev)
1530{
1531 u16 val;
1532
1533 if (likely(!asus_hides_smbus))
1534 return;
1535
1536 pci_read_config_word(dev, 0xF2, &val);
1537 if (val & 0x8) {
1538 pci_write_config_word(dev, 0xF2, val & (~0x8));
1539 pci_read_config_word(dev, 0xF2, &val);
1540 if (val & 0x8)
1541 pci_info(dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
1542 val);
1543 else
1544 pci_info(dev, "Enabled i801 SMBus device\n");
1545 }
1546}
1547DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1548DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1549DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1550DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1551DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1552DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1553DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1554DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1555DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1556DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1557DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1558DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1559DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1560DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1561
1562
1563static void __iomem *asus_rcba_base;
1564static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
1565{
1566 u32 rcba;
1567
1568 if (likely(!asus_hides_smbus))
1569 return;
1570 WARN_ON(asus_rcba_base);
1571
1572 pci_read_config_dword(dev, 0xF0, &rcba);
1573
1574 asus_rcba_base = ioremap(rcba & 0xFFFFC000, 0x4000);
1575 if (asus_rcba_base == NULL)
1576 return;
1577}
1578
1579static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
1580{
1581 u32 val;
1582
1583 if (likely(!asus_hides_smbus || !asus_rcba_base))
1584 return;
1585
1586
1587 val = readl(asus_rcba_base + 0x3418);
1588
1589
1590 writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418);
1591}
1592
1593static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
1594{
1595 if (likely(!asus_hides_smbus || !asus_rcba_base))
1596 return;
1597
1598 iounmap(asus_rcba_base);
1599 asus_rcba_base = NULL;
1600 pci_info(dev, "Enabled ICH6/i801 SMBus device\n");
1601}
1602
1603static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1604{
1605 asus_hides_smbus_lpc_ich6_suspend(dev);
1606 asus_hides_smbus_lpc_ich6_resume_early(dev);
1607 asus_hides_smbus_lpc_ich6_resume(dev);
1608}
1609DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6);
1610DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend);
1611DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
1612DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
1613
1614
1615static void quirk_sis_96x_smbus(struct pci_dev *dev)
1616{
1617 u8 val = 0;
1618 pci_read_config_byte(dev, 0x77, &val);
1619 if (val & 0x10) {
1620 pci_info(dev, "Enabling SiS 96x SMBus\n");
1621 pci_write_config_byte(dev, 0x77, val & ~0x10);
1622 }
1623}
1624DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1625DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1626DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1627DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1628DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1629DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1630DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1631DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641#define SIS_DETECT_REGISTER 0x40
1642
1643static void quirk_sis_503(struct pci_dev *dev)
1644{
1645 u8 reg;
1646 u16 devid;
1647
1648 pci_read_config_byte(dev, SIS_DETECT_REGISTER, ®);
1649 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
1650 pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
1651 if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
1652 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
1653 return;
1654 }
1655
1656
1657
1658
1659
1660
1661 dev->device = devid;
1662 quirk_sis_96x_smbus(dev);
1663}
1664DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1665DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1666
1667
1668
1669
1670
1671
1672
1673static void asus_hides_ac97_lpc(struct pci_dev *dev)
1674{
1675 u8 val;
1676 int asus_hides_ac97 = 0;
1677
1678 if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1679 if (dev->device == PCI_DEVICE_ID_VIA_8237)
1680 asus_hides_ac97 = 1;
1681 }
1682
1683 if (!asus_hides_ac97)
1684 return;
1685
1686 pci_read_config_byte(dev, 0x50, &val);
1687 if (val & 0xc0) {
1688 pci_write_config_byte(dev, 0x50, val & (~0xc0));
1689 pci_read_config_byte(dev, 0x50, &val);
1690 if (val & 0xc0)
1691 pci_info(dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
1692 val);
1693 else
1694 pci_info(dev, "Enabled onboard AC97/MC97 devices\n");
1695 }
1696}
1697DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1698DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1699
1700#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
1701
1702
1703
1704
1705
1706static void quirk_jmicron_ata(struct pci_dev *pdev)
1707{
1708 u32 conf1, conf5, class;
1709 u8 hdr;
1710
1711
1712 if (PCI_FUNC(pdev->devfn))
1713 return;
1714
1715 pci_read_config_dword(pdev, 0x40, &conf1);
1716 pci_read_config_dword(pdev, 0x80, &conf5);
1717
1718 conf1 &= ~0x00CFF302;
1719 conf5 &= ~(1 << 24);
1720
1721 switch (pdev->device) {
1722 case PCI_DEVICE_ID_JMICRON_JMB360:
1723 case PCI_DEVICE_ID_JMICRON_JMB362:
1724 case PCI_DEVICE_ID_JMICRON_JMB364:
1725
1726 conf1 |= 0x0002A100;
1727 break;
1728
1729 case PCI_DEVICE_ID_JMICRON_JMB365:
1730 case PCI_DEVICE_ID_JMICRON_JMB366:
1731
1732 conf5 |= (1 << 24);
1733 fallthrough;
1734 case PCI_DEVICE_ID_JMICRON_JMB361:
1735 case PCI_DEVICE_ID_JMICRON_JMB363:
1736 case PCI_DEVICE_ID_JMICRON_JMB369:
1737
1738
1739 conf1 |= 0x00C2A1B3;
1740 break;
1741
1742 case PCI_DEVICE_ID_JMICRON_JMB368:
1743
1744 conf1 |= 0x00C00000;
1745 break;
1746 }
1747
1748 pci_write_config_dword(pdev, 0x40, conf1);
1749 pci_write_config_dword(pdev, 0x80, conf5);
1750
1751
1752 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
1753 pdev->hdr_type = hdr & 0x7f;
1754 pdev->multifunction = !!(hdr & 0x80);
1755
1756 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
1757 pdev->class = class >> 8;
1758}
1759DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1760DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1761DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1762DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1763DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1764DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1765DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1766DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1767DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1768DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1769DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1770DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1771DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1772DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1773DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1774DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1775DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1776DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1777
1778#endif
1779
1780static void quirk_jmicron_async_suspend(struct pci_dev *dev)
1781{
1782 if (dev->multifunction) {
1783 device_disable_async_suspend(&dev->dev);
1784 pci_info(dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
1785 }
1786}
1787DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend);
1788DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend);
1789DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend);
1790DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend);
1791
1792#ifdef CONFIG_X86_IO_APIC
1793static void quirk_alder_ioapic(struct pci_dev *pdev)
1794{
1795 int i;
1796
1797 if ((pdev->class >> 8) != 0xff00)
1798 return;
1799
1800
1801
1802
1803
1804
1805 if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
1806 insert_resource(&iomem_resource, &pdev->resource[0]);
1807
1808
1809
1810
1811
1812 for (i = 1; i < PCI_STD_NUM_BARS; i++)
1813 memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
1814}
1815DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
1816#endif
1817
1818static void quirk_pcie_mch(struct pci_dev *pdev)
1819{
1820 pdev->no_msi = 1;
1821}
1822DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
1823DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
1824DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
1825
1826DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
1827
1828
1829
1830
1831
1832static void quirk_pcie_pxh(struct pci_dev *dev)
1833{
1834 dev->no_msi = 1;
1835 pci_warn(dev, "PXH quirk detected; SHPC device MSI disabled\n");
1836}
1837DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh);
1838DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh);
1839DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh);
1840DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh);
1841DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
1842
1843
1844
1845
1846
1847static void quirk_intel_pcie_pm(struct pci_dev *dev)
1848{
1849 pci_pm_d3_delay = 120;
1850 dev->no_d1d2 = 1;
1851}
1852DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
1853DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm);
1854DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm);
1855DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm);
1856DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm);
1857DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm);
1858DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm);
1859DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm);
1860DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm);
1861DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm);
1862DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm);
1863DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm);
1864DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm);
1865DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm);
1866DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm);
1867DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm);
1868DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm);
1869DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm);
1870DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
1871DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
1872DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
1873
1874static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay)
1875{
1876 if (dev->d3_delay >= delay)
1877 return;
1878
1879 dev->d3_delay = delay;
1880 pci_info(dev, "extending delay after power-on from D3hot to %d msec\n",
1881 dev->d3_delay);
1882}
1883
1884static void quirk_radeon_pm(struct pci_dev *dev)
1885{
1886 if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1887 dev->subsystem_device == 0x00e2)
1888 quirk_d3hot_delay(dev, 20);
1889}
1890DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
1902{
1903 quirk_d3hot_delay(dev, 20);
1904}
1905DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
1906DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
1907
1908#ifdef CONFIG_X86_IO_APIC
1909static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
1910{
1911 noioapicreroute = 1;
1912 pr_info("%s detected: disable boot interrupt reroute\n", d->ident);
1913
1914 return 0;
1915}
1916
1917static const struct dmi_system_id boot_interrupt_dmi_table[] = {
1918
1919
1920
1921 {
1922 .callback = dmi_disable_ioapicreroute,
1923 .ident = "ASUSTek Computer INC. M2N-LR",
1924 .matches = {
1925 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer INC."),
1926 DMI_MATCH(DMI_PRODUCT_NAME, "M2N-LR"),
1927 },
1928 },
1929 {}
1930};
1931
1932
1933
1934
1935
1936
1937
1938static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
1939{
1940 dmi_check_system(boot_interrupt_dmi_table);
1941 if (noioapicquirk || noioapicreroute)
1942 return;
1943
1944 dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
1945 pci_info(dev, "rerouting interrupts for [%04x:%04x]\n",
1946 dev->vendor, dev->device);
1947}
1948DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1949DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1950DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1951DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1952DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1953DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1954DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1955DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1956DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1957DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1958DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1959DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1960DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1961DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1962DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1963DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984#define INTEL_6300_IOAPIC_ABAR 0x40
1985#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
1986
1987#define INTEL_CIPINTRC_CFG_OFFSET 0x14C
1988#define INTEL_CIPINTRC_DIS_INTX_ICH (1<<25)
1989
1990static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
1991{
1992 u16 pci_config_word;
1993 u32 pci_config_dword;
1994
1995 if (noioapicquirk)
1996 return;
1997
1998 switch (dev->device) {
1999 case PCI_DEVICE_ID_INTEL_ESB_10:
2000 pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR,
2001 &pci_config_word);
2002 pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
2003 pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR,
2004 pci_config_word);
2005 break;
2006 case 0x3c28:
2007 case 0x0e28:
2008 case 0x2f28:
2009 case 0x6f28:
2010 case 0x2034:
2011 pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
2012 &pci_config_dword);
2013 pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH;
2014 pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
2015 pci_config_dword);
2016 break;
2017 default:
2018 return;
2019 }
2020 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2021 dev->vendor, dev->device);
2022}
2023
2024
2025
2026
2027DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
2028 quirk_disable_intel_boot_interrupt);
2029DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
2030 quirk_disable_intel_boot_interrupt);
2031
2032
2033
2034
2035
2036
2037
2038
2039DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x3c28,
2040 quirk_disable_intel_boot_interrupt);
2041DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0e28,
2042 quirk_disable_intel_boot_interrupt);
2043DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2f28,
2044 quirk_disable_intel_boot_interrupt);
2045DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x6f28,
2046 quirk_disable_intel_boot_interrupt);
2047DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2034,
2048 quirk_disable_intel_boot_interrupt);
2049DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x3c28,
2050 quirk_disable_intel_boot_interrupt);
2051DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x0e28,
2052 quirk_disable_intel_boot_interrupt);
2053DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2f28,
2054 quirk_disable_intel_boot_interrupt);
2055DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x6f28,
2056 quirk_disable_intel_boot_interrupt);
2057DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2034,
2058 quirk_disable_intel_boot_interrupt);
2059
2060
2061#define BC_HT1000_FEATURE_REG 0x64
2062#define BC_HT1000_PIC_REGS_ENABLE (1<<0)
2063#define BC_HT1000_MAP_IDX 0xC00
2064#define BC_HT1000_MAP_DATA 0xC01
2065
2066static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
2067{
2068 u32 pci_config_dword;
2069 u8 irq;
2070
2071 if (noioapicquirk)
2072 return;
2073
2074 pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
2075 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
2076 BC_HT1000_PIC_REGS_ENABLE);
2077
2078 for (irq = 0x10; irq < 0x10 + 32; irq++) {
2079 outb(irq, BC_HT1000_MAP_IDX);
2080 outb(0x00, BC_HT1000_MAP_DATA);
2081 }
2082
2083 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
2084
2085 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2086 dev->vendor, dev->device);
2087}
2088DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
2089DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
2090
2091
2092
2093
2094
2095
2096
2097
2098#define AMD_813X_MISC 0x40
2099#define AMD_813X_NOIOAMODE (1<<0)
2100#define AMD_813X_REV_B1 0x12
2101#define AMD_813X_REV_B2 0x13
2102
2103static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
2104{
2105 u32 pci_config_dword;
2106
2107 if (noioapicquirk)
2108 return;
2109 if ((dev->revision == AMD_813X_REV_B1) ||
2110 (dev->revision == AMD_813X_REV_B2))
2111 return;
2112
2113 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
2114 pci_config_dword &= ~AMD_813X_NOIOAMODE;
2115 pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
2116
2117 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2118 dev->vendor, dev->device);
2119}
2120DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2121DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2122DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2123DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2124
2125#define AMD_8111_PCI_IRQ_ROUTING 0x56
2126
2127static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
2128{
2129 u16 pci_config_word;
2130
2131 if (noioapicquirk)
2132 return;
2133
2134 pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
2135 if (!pci_config_word) {
2136 pci_info(dev, "boot interrupts on device [%04x:%04x] already disabled\n",
2137 dev->vendor, dev->device);
2138 return;
2139 }
2140 pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
2141 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2142 dev->vendor, dev->device);
2143}
2144DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
2145DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
2146#endif
2147
2148
2149
2150
2151
2152
2153static void quirk_tc86c001_ide(struct pci_dev *dev)
2154{
2155 struct resource *r = &dev->resource[0];
2156
2157 if (r->start & 0x8) {
2158 r->flags |= IORESOURCE_UNSET;
2159 r->start = 0;
2160 r->end = 0xf;
2161 }
2162}
2163DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
2164 PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
2165 quirk_tc86c001_ide);
2166
2167
2168
2169
2170
2171
2172
2173
2174static void quirk_plx_pci9050(struct pci_dev *dev)
2175{
2176 unsigned int bar;
2177
2178
2179 if (dev->revision >= 2)
2180 return;
2181 for (bar = 0; bar <= 1; bar++)
2182 if (pci_resource_len(dev, bar) == 0x80 &&
2183 (pci_resource_start(dev, bar) & 0x80)) {
2184 struct resource *r = &dev->resource[bar];
2185 pci_info(dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
2186 bar);
2187 r->flags |= IORESOURCE_UNSET;
2188 r->start = 0;
2189 r->end = 0xff;
2190 }
2191}
2192DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
2193 quirk_plx_pci9050);
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050);
2204DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050);
2205
2206static void quirk_netmos(struct pci_dev *dev)
2207{
2208 unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
2209 unsigned int num_serial = dev->subsystem_device & 0xf;
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221 switch (dev->device) {
2222 case PCI_DEVICE_ID_NETMOS_9835:
2223
2224 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
2225 dev->subsystem_device == 0x0299)
2226 return;
2227 fallthrough;
2228 case PCI_DEVICE_ID_NETMOS_9735:
2229 case PCI_DEVICE_ID_NETMOS_9745:
2230 case PCI_DEVICE_ID_NETMOS_9845:
2231 case PCI_DEVICE_ID_NETMOS_9855:
2232 if (num_parallel) {
2233 pci_info(dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
2234 dev->device, num_parallel, num_serial);
2235 dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
2236 (dev->class & 0xff);
2237 }
2238 }
2239}
2240DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
2241 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
2242
2243static void quirk_e100_interrupt(struct pci_dev *dev)
2244{
2245 u16 command, pmcsr;
2246 u8 __iomem *csr;
2247 u8 cmd_hi;
2248
2249 switch (dev->device) {
2250
2251 case 0x1029:
2252 case 0x1030 ... 0x1034:
2253 case 0x1038 ... 0x103E:
2254 case 0x1050 ... 0x1057:
2255 case 0x1059:
2256 case 0x1064 ... 0x106B:
2257 case 0x1091 ... 0x1095:
2258 case 0x1209:
2259 case 0x1229:
2260 case 0x2449:
2261 case 0x2459:
2262 case 0x245D:
2263 case 0x27DC:
2264 break;
2265 default:
2266 return;
2267 }
2268
2269
2270
2271
2272
2273
2274
2275
2276 pci_read_config_word(dev, PCI_COMMAND, &command);
2277
2278 if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
2279 return;
2280
2281
2282
2283
2284
2285 if (dev->pm_cap) {
2286 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2287 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
2288 return;
2289 }
2290
2291
2292 csr = ioremap(pci_resource_start(dev, 0), 8);
2293 if (!csr) {
2294 pci_warn(dev, "Can't map e100 registers\n");
2295 return;
2296 }
2297
2298 cmd_hi = readb(csr + 3);
2299 if (cmd_hi == 0) {
2300 pci_warn(dev, "Firmware left e100 interrupts enabled; disabling\n");
2301 writeb(1, csr + 3);
2302 }
2303
2304 iounmap(csr);
2305}
2306DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
2307 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt);
2308
2309
2310
2311
2312
2313static void quirk_disable_aspm_l0s(struct pci_dev *dev)
2314{
2315 pci_info(dev, "Disabling L0s\n");
2316 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
2317}
2318DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
2319DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
2320DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
2321DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
2322DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
2323DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
2324DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
2325DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
2326DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
2327DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
2328DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
2329DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
2330DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
2331DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
2332
2333static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
2334{
2335 pci_info(dev, "Disabling ASPM L0s/L1\n");
2336 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
2337}
2338
2339
2340
2341
2342
2343
2344DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
2355{
2356 dev->clear_retrain_link = 1;
2357 pci_info(dev, "Enable PCIe Retrain Link quirk\n");
2358}
2359DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
2360DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
2361DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
2362
2363static void fixup_rev1_53c810(struct pci_dev *dev)
2364{
2365 u32 class = dev->class;
2366
2367
2368
2369
2370
2371 if (class)
2372 return;
2373
2374 dev->class = PCI_CLASS_STORAGE_SCSI << 8;
2375 pci_info(dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n",
2376 class, dev->class);
2377}
2378DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
2379
2380
2381static void quirk_p64h2_1k_io(struct pci_dev *dev)
2382{
2383 u16 en1k;
2384
2385 pci_read_config_word(dev, 0x40, &en1k);
2386
2387 if (en1k & 0x200) {
2388 pci_info(dev, "Enable I/O Space to 1KB granularity\n");
2389 dev->io_window_1k = 1;
2390 }
2391}
2392DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
2393
2394
2395
2396
2397
2398
2399static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
2400{
2401 uint8_t b;
2402
2403 if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
2404 if (!(b & 0x20)) {
2405 pci_write_config_byte(dev, 0xf41, b | 0x20);
2406 pci_info(dev, "Linking AER extended capability\n");
2407 }
2408 }
2409}
2410DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2411 quirk_nvidia_ck804_pcie_aer_ext_cap);
2412DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2413 quirk_nvidia_ck804_pcie_aer_ext_cap);
2414
2415static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
2416{
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428 struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,
2429 PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
2430 uint8_t b;
2431
2432
2433
2434
2435
2436 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
2437 if (!p)
2438 return;
2439 pci_dev_put(p);
2440
2441 if (pci_read_config_byte(dev, 0x76, &b) == 0) {
2442 if (b & 0x40) {
2443
2444 pci_write_config_byte(dev, 0x76, b ^ 0x40);
2445
2446 pci_info(dev, "Disabling VIA CX700 PCI parking\n");
2447 }
2448 }
2449
2450 if (pci_read_config_byte(dev, 0x72, &b) == 0) {
2451 if (b != 0) {
2452
2453 pci_write_config_byte(dev, 0x72, 0x0);
2454
2455
2456 pci_write_config_byte(dev, 0x75, 0x1);
2457
2458
2459 pci_write_config_byte(dev, 0x77, 0x0);
2460
2461 pci_info(dev, "Disabling VIA CX700 PCI caching\n");
2462 }
2463 }
2464}
2465DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
2466
2467static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
2468{
2469 u32 rev;
2470
2471 pci_read_config_dword(dev, 0xf4, &rev);
2472
2473
2474 if (rev == 0x05719000) {
2475 int readrq = pcie_get_readrq(dev);
2476 if (readrq > 2048)
2477 pcie_set_readrq(dev, 2048);
2478 }
2479}
2480DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
2481 PCI_DEVICE_ID_TIGON3_5719,
2482 quirk_brcm_5719_limit_mrrs);
2483
2484
2485
2486
2487
2488
2489
2490static void quirk_unhide_mch_dev6(struct pci_dev *dev)
2491{
2492 u8 reg;
2493
2494 if (pci_read_config_byte(dev, 0xF4, ®) == 0 && !(reg & 0x02)) {
2495 pci_info(dev, "Enabling MCH 'Overflow' Device\n");
2496 pci_write_config_byte(dev, 0xF4, reg | 0x02);
2497 }
2498}
2499DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
2500 quirk_unhide_mch_dev6);
2501DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
2502 quirk_unhide_mch_dev6);
2503
2504#ifdef CONFIG_PCI_MSI
2505
2506
2507
2508
2509
2510
2511
2512static void quirk_disable_all_msi(struct pci_dev *dev)
2513{
2514 pci_no_msi();
2515 pci_warn(dev, "MSI quirk detected; MSI disabled\n");
2516}
2517DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
2518DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
2519DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
2520DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
2521DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
2522DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
2523DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
2524DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
2525
2526
2527static void quirk_disable_msi(struct pci_dev *dev)
2528{
2529 if (dev->subordinate) {
2530 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2531 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2532 }
2533}
2534DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
2535DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
2536DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
2537
2538
2539
2540
2541
2542
2543
2544static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
2545{
2546 struct pci_dev *apc_bridge;
2547
2548 apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0));
2549 if (apc_bridge) {
2550 if (apc_bridge->device == 0x9602)
2551 quirk_disable_msi(apc_bridge);
2552 pci_dev_put(apc_bridge);
2553 }
2554}
2555DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
2556DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
2557
2558
2559
2560
2561
2562static int msi_ht_cap_enabled(struct pci_dev *dev)
2563{
2564 int pos, ttl = PCI_FIND_CAP_TTL;
2565
2566 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2567 while (pos && ttl--) {
2568 u8 flags;
2569
2570 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2571 &flags) == 0) {
2572 pci_info(dev, "Found %s HT MSI Mapping\n",
2573 flags & HT_MSI_FLAGS_ENABLE ?
2574 "enabled" : "disabled");
2575 return (flags & HT_MSI_FLAGS_ENABLE) != 0;
2576 }
2577
2578 pos = pci_find_next_ht_capability(dev, pos,
2579 HT_CAPTYPE_MSI_MAPPING);
2580 }
2581 return 0;
2582}
2583
2584
2585static void quirk_msi_ht_cap(struct pci_dev *dev)
2586{
2587 if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
2588 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2589 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2590 }
2591}
2592DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
2593 quirk_msi_ht_cap);
2594
2595
2596
2597
2598
2599static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
2600{
2601 struct pci_dev *pdev;
2602
2603 if (!dev->subordinate)
2604 return;
2605
2606
2607
2608
2609
2610 pdev = pci_get_slot(dev->bus, 0);
2611 if (!pdev)
2612 return;
2613 if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
2614 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2615 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2616 }
2617 pci_dev_put(pdev);
2618}
2619DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2620 quirk_nvidia_ck804_msi_ht_cap);
2621
2622
2623static void ht_enable_msi_mapping(struct pci_dev *dev)
2624{
2625 int pos, ttl = PCI_FIND_CAP_TTL;
2626
2627 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2628 while (pos && ttl--) {
2629 u8 flags;
2630
2631 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2632 &flags) == 0) {
2633 pci_info(dev, "Enabling HT MSI Mapping\n");
2634
2635 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2636 flags | HT_MSI_FLAGS_ENABLE);
2637 }
2638 pos = pci_find_next_ht_capability(dev, pos,
2639 HT_CAPTYPE_MSI_MAPPING);
2640 }
2641}
2642DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
2643 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
2644 ht_enable_msi_mapping);
2645DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
2646 ht_enable_msi_mapping);
2647
2648
2649
2650
2651
2652
2653static void nvenet_msi_disable(struct pci_dev *dev)
2654{
2655 const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
2656
2657 if (board_name &&
2658 (strstr(board_name, "P5N32-SLI PREMIUM") ||
2659 strstr(board_name, "P5N32-E SLI"))) {
2660 pci_info(dev, "Disabling MSI for MCP55 NIC on P5N32-SLI\n");
2661 dev->no_msi = 1;
2662 }
2663}
2664DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2665 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2666 nvenet_msi_disable);
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677static void pci_quirk_nvidia_tegra_disable_rp_msi(struct pci_dev *dev)
2678{
2679 dev->no_msi = 1;
2680}
2681DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad0,
2682 PCI_CLASS_BRIDGE_PCI, 8,
2683 pci_quirk_nvidia_tegra_disable_rp_msi);
2684DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad1,
2685 PCI_CLASS_BRIDGE_PCI, 8,
2686 pci_quirk_nvidia_tegra_disable_rp_msi);
2687DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad2,
2688 PCI_CLASS_BRIDGE_PCI, 8,
2689 pci_quirk_nvidia_tegra_disable_rp_msi);
2690DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0,
2691 PCI_CLASS_BRIDGE_PCI, 8,
2692 pci_quirk_nvidia_tegra_disable_rp_msi);
2693DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1,
2694 PCI_CLASS_BRIDGE_PCI, 8,
2695 pci_quirk_nvidia_tegra_disable_rp_msi);
2696DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c,
2697 PCI_CLASS_BRIDGE_PCI, 8,
2698 pci_quirk_nvidia_tegra_disable_rp_msi);
2699DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d,
2700 PCI_CLASS_BRIDGE_PCI, 8,
2701 pci_quirk_nvidia_tegra_disable_rp_msi);
2702DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e12,
2703 PCI_CLASS_BRIDGE_PCI, 8,
2704 pci_quirk_nvidia_tegra_disable_rp_msi);
2705DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e13,
2706 PCI_CLASS_BRIDGE_PCI, 8,
2707 pci_quirk_nvidia_tegra_disable_rp_msi);
2708DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0fae,
2709 PCI_CLASS_BRIDGE_PCI, 8,
2710 pci_quirk_nvidia_tegra_disable_rp_msi);
2711DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0faf,
2712 PCI_CLASS_BRIDGE_PCI, 8,
2713 pci_quirk_nvidia_tegra_disable_rp_msi);
2714DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e5,
2715 PCI_CLASS_BRIDGE_PCI, 8,
2716 pci_quirk_nvidia_tegra_disable_rp_msi);
2717DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e6,
2718 PCI_CLASS_BRIDGE_PCI, 8,
2719 pci_quirk_nvidia_tegra_disable_rp_msi);
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
2732{
2733 u32 cfg;
2734
2735 if (!pci_find_capability(dev, PCI_CAP_ID_HT))
2736 return;
2737
2738 pci_read_config_dword(dev, 0x74, &cfg);
2739
2740 if (cfg & ((1 << 2) | (1 << 15))) {
2741 pr_info("Rewriting IRQ routing register on MCP55\n");
2742 cfg &= ~((1 << 2) | (1 << 15));
2743 pci_write_config_dword(dev, 0x74, cfg);
2744 }
2745}
2746DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2747 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
2748 nvbridge_check_legacy_irq_routing);
2749DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2750 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
2751 nvbridge_check_legacy_irq_routing);
2752
2753static int ht_check_msi_mapping(struct pci_dev *dev)
2754{
2755 int pos, ttl = PCI_FIND_CAP_TTL;
2756 int found = 0;
2757
2758
2759 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2760 while (pos && ttl--) {
2761 u8 flags;
2762
2763 if (found < 1)
2764 found = 1;
2765 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2766 &flags) == 0) {
2767 if (flags & HT_MSI_FLAGS_ENABLE) {
2768 if (found < 2) {
2769 found = 2;
2770 break;
2771 }
2772 }
2773 }
2774 pos = pci_find_next_ht_capability(dev, pos,
2775 HT_CAPTYPE_MSI_MAPPING);
2776 }
2777
2778 return found;
2779}
2780
2781static int host_bridge_with_leaf(struct pci_dev *host_bridge)
2782{
2783 struct pci_dev *dev;
2784 int pos;
2785 int i, dev_no;
2786 int found = 0;
2787
2788 dev_no = host_bridge->devfn >> 3;
2789 for (i = dev_no + 1; i < 0x20; i++) {
2790 dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
2791 if (!dev)
2792 continue;
2793
2794
2795 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2796 if (pos != 0) {
2797 pci_dev_put(dev);
2798 break;
2799 }
2800
2801 if (ht_check_msi_mapping(dev)) {
2802 found = 1;
2803 pci_dev_put(dev);
2804 break;
2805 }
2806 pci_dev_put(dev);
2807 }
2808
2809 return found;
2810}
2811
2812#define PCI_HT_CAP_SLAVE_CTRL0 4
2813#define PCI_HT_CAP_SLAVE_CTRL1 8
2814
2815static int is_end_of_ht_chain(struct pci_dev *dev)
2816{
2817 int pos, ctrl_off;
2818 int end = 0;
2819 u16 flags, ctrl;
2820
2821 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2822
2823 if (!pos)
2824 goto out;
2825
2826 pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
2827
2828 ctrl_off = ((flags >> 10) & 1) ?
2829 PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
2830 pci_read_config_word(dev, pos + ctrl_off, &ctrl);
2831
2832 if (ctrl & (1 << 6))
2833 end = 1;
2834
2835out:
2836 return end;
2837}
2838
2839static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
2840{
2841 struct pci_dev *host_bridge;
2842 int pos;
2843 int i, dev_no;
2844 int found = 0;
2845
2846 dev_no = dev->devfn >> 3;
2847 for (i = dev_no; i >= 0; i--) {
2848 host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
2849 if (!host_bridge)
2850 continue;
2851
2852 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2853 if (pos != 0) {
2854 found = 1;
2855 break;
2856 }
2857 pci_dev_put(host_bridge);
2858 }
2859
2860 if (!found)
2861 return;
2862
2863
2864 if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
2865 host_bridge_with_leaf(host_bridge))
2866 goto out;
2867
2868
2869 if (msi_ht_cap_enabled(host_bridge))
2870 goto out;
2871
2872 ht_enable_msi_mapping(dev);
2873
2874out:
2875 pci_dev_put(host_bridge);
2876}
2877
2878static void ht_disable_msi_mapping(struct pci_dev *dev)
2879{
2880 int pos, ttl = PCI_FIND_CAP_TTL;
2881
2882 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2883 while (pos && ttl--) {
2884 u8 flags;
2885
2886 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2887 &flags) == 0) {
2888 pci_info(dev, "Disabling HT MSI Mapping\n");
2889
2890 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2891 flags & ~HT_MSI_FLAGS_ENABLE);
2892 }
2893 pos = pci_find_next_ht_capability(dev, pos,
2894 HT_CAPTYPE_MSI_MAPPING);
2895 }
2896}
2897
2898static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
2899{
2900 struct pci_dev *host_bridge;
2901 int pos;
2902 int found;
2903
2904 if (!pci_msi_enabled())
2905 return;
2906
2907
2908 found = ht_check_msi_mapping(dev);
2909
2910
2911 if (found == 0)
2912 return;
2913
2914
2915
2916
2917
2918 host_bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), 0,
2919 PCI_DEVFN(0, 0));
2920 if (host_bridge == NULL) {
2921 pci_warn(dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
2922 return;
2923 }
2924
2925 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2926 if (pos != 0) {
2927
2928 if (found == 1) {
2929
2930 if (all)
2931 ht_enable_msi_mapping(dev);
2932 else
2933 nv_ht_enable_msi_mapping(dev);
2934 }
2935 goto out;
2936 }
2937
2938
2939 if (found == 1)
2940 goto out;
2941
2942
2943 ht_disable_msi_mapping(dev);
2944
2945out:
2946 pci_dev_put(host_bridge);
2947}
2948
2949static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
2950{
2951 return __nv_msi_ht_cap_quirk(dev, 1);
2952}
2953DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2954DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2955
2956static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
2957{
2958 return __nv_msi_ht_cap_quirk(dev, 0);
2959}
2960DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2961DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2962
2963static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
2964{
2965 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2966}
2967
2968static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
2969{
2970 struct pci_dev *p;
2971
2972
2973
2974
2975
2976
2977 p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
2978 NULL);
2979 if (!p)
2980 return;
2981
2982 if ((p->revision < 0x3B) && (p->revision >= 0x30))
2983 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2984 pci_dev_put(p);
2985}
2986
2987static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
2988{
2989
2990 if (dev->revision < 0x18) {
2991 pci_info(dev, "set MSI_INTX_DISABLE_BUG flag\n");
2992 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2993 }
2994}
2995DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2996 PCI_DEVICE_ID_TIGON3_5780,
2997 quirk_msi_intx_disable_bug);
2998DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2999 PCI_DEVICE_ID_TIGON3_5780S,
3000 quirk_msi_intx_disable_bug);
3001DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3002 PCI_DEVICE_ID_TIGON3_5714,
3003 quirk_msi_intx_disable_bug);
3004DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3005 PCI_DEVICE_ID_TIGON3_5714S,
3006 quirk_msi_intx_disable_bug);
3007DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3008 PCI_DEVICE_ID_TIGON3_5715,
3009 quirk_msi_intx_disable_bug);
3010DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
3011 PCI_DEVICE_ID_TIGON3_5715S,
3012 quirk_msi_intx_disable_bug);
3013
3014DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390,
3015 quirk_msi_intx_disable_ati_bug);
3016DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391,
3017 quirk_msi_intx_disable_ati_bug);
3018DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392,
3019 quirk_msi_intx_disable_ati_bug);
3020DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393,
3021 quirk_msi_intx_disable_ati_bug);
3022DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394,
3023 quirk_msi_intx_disable_ati_bug);
3024
3025DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373,
3026 quirk_msi_intx_disable_bug);
3027DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
3028 quirk_msi_intx_disable_bug);
3029DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
3030 quirk_msi_intx_disable_bug);
3031
3032DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
3033 quirk_msi_intx_disable_bug);
3034DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
3035 quirk_msi_intx_disable_bug);
3036DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
3037 quirk_msi_intx_disable_bug);
3038DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
3039 quirk_msi_intx_disable_bug);
3040DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
3041 quirk_msi_intx_disable_bug);
3042DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
3043 quirk_msi_intx_disable_bug);
3044DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090,
3045 quirk_msi_intx_disable_qca_bug);
3046DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091,
3047 quirk_msi_intx_disable_qca_bug);
3048DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0,
3049 quirk_msi_intx_disable_qca_bug);
3050DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
3051 quirk_msi_intx_disable_qca_bug);
3052DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
3053 quirk_msi_intx_disable_qca_bug);
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065static void quirk_al_msi_disable(struct pci_dev *dev)
3066{
3067 dev->no_msi = 1;
3068 pci_warn(dev, "Disabling MSI/MSI-X\n");
3069}
3070DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
3071 PCI_CLASS_BRIDGE_PCI, 8, quirk_al_msi_disable);
3072#endif
3073
3074
3075
3076
3077
3078
3079
3080
3081static void quirk_hotplug_bridge(struct pci_dev *dev)
3082{
3083 dev->is_hotplug_bridge = 1;
3084}
3085DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112#ifdef CONFIG_MMC_RICOH_MMC
3113static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
3114{
3115 u8 write_enable;
3116 u8 write_target;
3117 u8 disable;
3118
3119
3120
3121
3122
3123
3124 if (PCI_FUNC(dev->devfn))
3125 return;
3126
3127 pci_read_config_byte(dev, 0xB7, &disable);
3128 if (disable & 0x02)
3129 return;
3130
3131 pci_read_config_byte(dev, 0x8E, &write_enable);
3132 pci_write_config_byte(dev, 0x8E, 0xAA);
3133 pci_read_config_byte(dev, 0x8D, &write_target);
3134 pci_write_config_byte(dev, 0x8D, 0xB7);
3135 pci_write_config_byte(dev, 0xB7, disable | 0x02);
3136 pci_write_config_byte(dev, 0x8E, write_enable);
3137 pci_write_config_byte(dev, 0x8D, write_target);
3138
3139 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via CardBus function)\n");
3140 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
3141}
3142DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
3143DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
3144
3145static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
3146{
3147 u8 write_enable;
3148 u8 disable;
3149
3150
3151
3152
3153
3154
3155 if (PCI_FUNC(dev->devfn))
3156 return;
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169 if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
3170 dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
3171 pci_write_config_byte(dev, 0xf9, 0xfc);
3172 pci_write_config_byte(dev, 0x150, 0x10);
3173 pci_write_config_byte(dev, 0xf9, 0x00);
3174 pci_write_config_byte(dev, 0xfc, 0x01);
3175 pci_write_config_byte(dev, 0xe1, 0x32);
3176 pci_write_config_byte(dev, 0xfc, 0x00);
3177
3178 pci_notice(dev, "MMC controller base frequency changed to 50Mhz.\n");
3179 }
3180
3181 pci_read_config_byte(dev, 0xCB, &disable);
3182
3183 if (disable & 0x02)
3184 return;
3185
3186 pci_read_config_byte(dev, 0xCA, &write_enable);
3187 pci_write_config_byte(dev, 0xCA, 0x57);
3188 pci_write_config_byte(dev, 0xCB, disable | 0x02);
3189 pci_write_config_byte(dev, 0xCA, write_enable);
3190
3191 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via FireWire function)\n");
3192 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
3193
3194}
3195DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
3196DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
3197DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
3198DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
3199DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
3200DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
3201#endif
3202
3203#ifdef CONFIG_DMAR_TABLE
3204#define VTUNCERRMSK_REG 0x1ac
3205#define VTD_MSK_SPEC_ERRORS (1 << 31)
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216static void vtd_mask_spec_errors(struct pci_dev *dev)
3217{
3218 u32 word;
3219
3220 pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
3221 pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
3222}
3223DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
3224DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
3225#endif
3226
3227static void fixup_ti816x_class(struct pci_dev *dev)
3228{
3229 u32 class = dev->class;
3230
3231
3232 dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
3233 pci_info(dev, "PCI class overridden (%#08x -> %#08x)\n",
3234 class, dev->class);
3235}
3236DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
3237 PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
3238
3239
3240
3241
3242
3243static void fixup_mpss_256(struct pci_dev *dev)
3244{
3245 dev->pcie_mpss = 1;
3246}
3247DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3248 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
3249DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3250 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
3251DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3252 PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262static void quirk_intel_mc_errata(struct pci_dev *dev)
3263{
3264 int err;
3265 u16 rcc;
3266
3267 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
3268 pcie_bus_config == PCIE_BUS_DEFAULT)
3269 return;
3270
3271
3272
3273
3274
3275
3276 err = pci_read_config_word(dev, 0x48, &rcc);
3277 if (err) {
3278 pci_err(dev, "Error attempting to read the read completion coalescing register\n");
3279 return;
3280 }
3281
3282 if (!(rcc & (1 << 10)))
3283 return;
3284
3285 rcc &= ~(1 << 10);
3286
3287 err = pci_write_config_word(dev, 0x48, rcc);
3288 if (err) {
3289 pci_err(dev, "Error attempting to write the read completion coalescing register\n");
3290 return;
3291 }
3292
3293 pr_info_once("Read completion coalescing disabled due to hardware erratum relating to 256B MPS\n");
3294}
3295
3296DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
3297DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
3298DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
3299DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
3300DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
3301DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
3302DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
3303DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
3304DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
3305DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
3306DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
3307DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
3308DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
3309DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
3310
3311DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
3312DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
3313DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
3314DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
3315DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
3316DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
3317DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
3318DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
3319DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
3320DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
3321DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
3322
3323
3324
3325
3326
3327
3328static void quirk_intel_ntb(struct pci_dev *dev)
3329{
3330 int rc;
3331 u8 val;
3332
3333 rc = pci_read_config_byte(dev, 0x00D0, &val);
3334 if (rc)
3335 return;
3336
3337 dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
3338
3339 rc = pci_read_config_byte(dev, 0x00D1, &val);
3340 if (rc)
3341 return;
3342
3343 dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
3344}
3345DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
3346DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360#define I915_DEIER_REG 0x4400c
3361static void disable_igfx_irq(struct pci_dev *dev)
3362{
3363 void __iomem *regs = pci_iomap(dev, 0, 0);
3364 if (regs == NULL) {
3365 pci_warn(dev, "igfx quirk: Can't iomap PCI device\n");
3366 return;
3367 }
3368
3369
3370 if (readl(regs + I915_DEIER_REG) != 0) {
3371 pci_warn(dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
3372
3373 writel(0, regs + I915_DEIER_REG);
3374 }
3375
3376 pci_iounmap(dev, regs);
3377}
3378DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq);
3379DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq);
3380DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq);
3381DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
3382DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq);
3383DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
3384DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
3385
3386
3387
3388
3389
3390static void quirk_remove_d3_delay(struct pci_dev *dev)
3391{
3392 dev->d3_delay = 0;
3393}
3394
3395DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
3396DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
3397DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
3398
3399DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
3400DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
3401DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
3402DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
3403DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
3404DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
3405DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
3406DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
3407DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
3408DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
3409DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
3410
3411DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3_delay);
3412DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
3413DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
3414DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3_delay);
3415DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
3416DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
3417DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3_delay);
3418DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3_delay);
3419DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3_delay);
3420
3421
3422
3423
3424
3425
3426static void quirk_broken_intx_masking(struct pci_dev *dev)
3427{
3428 dev->broken_intx_masking = 1;
3429}
3430DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030,
3431 quirk_broken_intx_masking);
3432DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601,
3433 quirk_broken_intx_masking);
3434DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004,
3435 quirk_broken_intx_masking);
3436
3437
3438
3439
3440
3441
3442
3443DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
3444 quirk_broken_intx_masking);
3445
3446
3447
3448
3449
3450DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, quirk_broken_intx_masking);
3451DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, quirk_broken_intx_masking);
3452DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, quirk_broken_intx_masking);
3453DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, quirk_broken_intx_masking);
3454DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, quirk_broken_intx_masking);
3455DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, quirk_broken_intx_masking);
3456DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, quirk_broken_intx_masking);
3457DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, quirk_broken_intx_masking);
3458DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, quirk_broken_intx_masking);
3459DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking);
3460DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking);
3461DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, quirk_broken_intx_masking);
3462DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, quirk_broken_intx_masking);
3463DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking);
3464DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, quirk_broken_intx_masking);
3465DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, quirk_broken_intx_masking);
3466
3467static u16 mellanox_broken_intx_devs[] = {
3468 PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
3469 PCI_DEVICE_ID_MELLANOX_HERMON_DDR,
3470 PCI_DEVICE_ID_MELLANOX_HERMON_QDR,
3471 PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2,
3472 PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2,
3473 PCI_DEVICE_ID_MELLANOX_HERMON_EN,
3474 PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2,
3475 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN,
3476 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2,
3477 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2,
3478 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2,
3479 PCI_DEVICE_ID_MELLANOX_CONNECTX2,
3480 PCI_DEVICE_ID_MELLANOX_CONNECTX3,
3481 PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO,
3482};
3483
3484#define CONNECTX_4_CURR_MAX_MINOR 99
3485#define CONNECTX_4_INTX_SUPPORT_MINOR 14
3486
3487
3488
3489
3490
3491
3492
3493static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
3494{
3495 __be32 __iomem *fw_ver;
3496 u16 fw_major;
3497 u16 fw_minor;
3498 u16 fw_subminor;
3499 u32 fw_maj_min;
3500 u32 fw_sub_min;
3501 int i;
3502
3503 for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) {
3504 if (pdev->device == mellanox_broken_intx_devs[i]) {
3505 pdev->broken_intx_masking = 1;
3506 return;
3507 }
3508 }
3509
3510
3511
3512
3513
3514 if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
3515 return;
3516
3517 if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 &&
3518 pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX)
3519 return;
3520
3521
3522 if (pci_enable_device_mem(pdev)) {
3523 pci_warn(pdev, "Can't enable device memory\n");
3524 return;
3525 }
3526
3527 fw_ver = ioremap(pci_resource_start(pdev, 0), 4);
3528 if (!fw_ver) {
3529 pci_warn(pdev, "Can't map ConnectX-4 initialization segment\n");
3530 goto out;
3531 }
3532
3533
3534 fw_maj_min = ioread32be(fw_ver);
3535 fw_sub_min = ioread32be(fw_ver + 1);
3536 fw_major = fw_maj_min & 0xffff;
3537 fw_minor = fw_maj_min >> 16;
3538 fw_subminor = fw_sub_min & 0xffff;
3539 if (fw_minor > CONNECTX_4_CURR_MAX_MINOR ||
3540 fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) {
3541 pci_warn(pdev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n",
3542 fw_major, fw_minor, fw_subminor, pdev->device ==
3543 PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14);
3544 pdev->broken_intx_masking = 1;
3545 }
3546
3547 iounmap(fw_ver);
3548
3549out:
3550 pci_disable_device(pdev);
3551}
3552DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
3553 mellanox_check_broken_intx_masking);
3554
3555static void quirk_no_bus_reset(struct pci_dev *dev)
3556{
3557 dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
3558}
3559
3560
3561
3562
3563
3564
3565
3566
3567DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
3568DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
3569DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
3570DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
3571DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
3572
3573
3574
3575
3576
3577
3578DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
3579
3580static void quirk_no_pm_reset(struct pci_dev *dev)
3581{
3582
3583
3584
3585
3586 if (!pci_is_root_bus(dev->bus))
3587 dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
3588}
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
3599 PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
3600
3601
3602
3603
3604
3605
3606static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
3607{
3608 if (pdev->is_hotplug_bridge &&
3609 (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
3610 pdev->revision <= 1))
3611 pdev->no_msi = 1;
3612}
3613DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
3614 quirk_thunderbolt_hotplug_msi);
3615DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE,
3616 quirk_thunderbolt_hotplug_msi);
3617DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK,
3618 quirk_thunderbolt_hotplug_msi);
3619DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3620 quirk_thunderbolt_hotplug_msi);
3621DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
3622 quirk_thunderbolt_hotplug_msi);
3623
3624#ifdef CONFIG_ACPI
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
3641{
3642 acpi_handle bridge, SXIO, SXFP, SXLV;
3643
3644 if (!x86_apple_machine)
3645 return;
3646 if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
3647 return;
3648 bridge = ACPI_HANDLE(&dev->dev);
3649 if (!bridge)
3650 return;
3651
3652
3653
3654
3655
3656
3657
3658
3659 if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
3660 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
3661 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
3662 return;
3663 pci_info(dev, "quirk: cutting power to Thunderbolt controller...\n");
3664
3665
3666 acpi_execute_simple_method(SXIO, NULL, 1);
3667 acpi_execute_simple_method(SXFP, NULL, 0);
3668 msleep(300);
3669 acpi_execute_simple_method(SXLV, NULL, 0);
3670 acpi_execute_simple_method(SXIO, NULL, 0);
3671 acpi_execute_simple_method(SXLV, NULL, 0);
3672}
3673DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
3674 PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3675 quirk_apple_poweroff_thunderbolt);
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
3687{
3688 struct pci_dev *sibling = NULL;
3689 struct pci_dev *nhi = NULL;
3690
3691 if (!x86_apple_machine)
3692 return;
3693 if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
3694 return;
3695
3696
3697
3698
3699
3700 sibling = pci_get_slot(dev->bus, 0x0);
3701 if (sibling == dev)
3702 goto out;
3703 if (!sibling || !sibling->subordinate)
3704 goto out;
3705 nhi = pci_get_slot(sibling->subordinate, 0x0);
3706 if (!nhi)
3707 goto out;
3708 if (nhi->vendor != PCI_VENDOR_ID_INTEL
3709 || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
3710 nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
3711 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI &&
3712 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
3713 || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
3714 goto out;
3715 pci_info(dev, "quirk: waiting for Thunderbolt to reestablish PCI tunnels...\n");
3716 device_pm_wait_for_dev(&dev->dev, &nhi->dev);
3717out:
3718 pci_dev_put(nhi);
3719 pci_dev_put(sibling);
3720}
3721DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3722 PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
3723 quirk_apple_wait_for_thunderbolt);
3724DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3725 PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3726 quirk_apple_wait_for_thunderbolt);
3727DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3728 PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE,
3729 quirk_apple_wait_for_thunderbolt);
3730DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3731 PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
3732 quirk_apple_wait_for_thunderbolt);
3733#endif
3734
3735
3736
3737
3738
3739
3740static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
3741{
3742
3743
3744
3745
3746
3747
3748
3749
3750 if (!probe)
3751 pcie_flr(dev);
3752 return 0;
3753}
3754
3755#define SOUTH_CHICKEN2 0xc2004
3756#define PCH_PP_STATUS 0xc7200
3757#define PCH_PP_CONTROL 0xc7204
3758#define MSG_CTL 0x45010
3759#define NSDE_PWR_STATE 0xd0100
3760#define IGD_OPERATION_TIMEOUT 10000
3761
3762static int reset_ivb_igd(struct pci_dev *dev, int probe)
3763{
3764 void __iomem *mmio_base;
3765 unsigned long timeout;
3766 u32 val;
3767
3768 if (probe)
3769 return 0;
3770
3771 mmio_base = pci_iomap(dev, 0, 0);
3772 if (!mmio_base)
3773 return -ENOMEM;
3774
3775 iowrite32(0x00000002, mmio_base + MSG_CTL);
3776
3777
3778
3779
3780
3781
3782
3783 iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2);
3784
3785 val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe;
3786 iowrite32(val, mmio_base + PCH_PP_CONTROL);
3787
3788 timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT);
3789 do {
3790 val = ioread32(mmio_base + PCH_PP_STATUS);
3791 if ((val & 0xb0000000) == 0)
3792 goto reset_complete;
3793 msleep(10);
3794 } while (time_before(jiffies, timeout));
3795 pci_warn(dev, "timeout during reset\n");
3796
3797reset_complete:
3798 iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
3799
3800 pci_iounmap(dev, mmio_base);
3801 return 0;
3802}
3803
3804
3805static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
3806{
3807 u16 old_command;
3808 u16 msix_flags;
3809
3810
3811
3812
3813
3814 if ((dev->device & 0xf000) != 0x4000)
3815 return -ENOTTY;
3816
3817
3818
3819
3820
3821 if (probe)
3822 return 0;
3823
3824
3825
3826
3827
3828
3829
3830 pci_read_config_word(dev, PCI_COMMAND, &old_command);
3831 pci_write_config_word(dev, PCI_COMMAND,
3832 old_command | PCI_COMMAND_MASTER);
3833
3834
3835
3836
3837
3838 pci_save_state(dev);
3839
3840
3841
3842
3843
3844
3845
3846
3847 pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
3848 if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
3849 pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
3850 msix_flags |
3851 PCI_MSIX_FLAGS_ENABLE |
3852 PCI_MSIX_FLAGS_MASKALL);
3853
3854 pcie_flr(dev);
3855
3856
3857
3858
3859
3860
3861 pci_restore_state(dev);
3862 pci_write_config_word(dev, PCI_COMMAND, old_command);
3863 return 0;
3864}
3865
3866#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
3867#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
3868#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883static int nvme_disable_and_flr(struct pci_dev *dev, int probe)
3884{
3885 void __iomem *bar;
3886 u16 cmd;
3887 u32 cfg;
3888
3889 if (dev->class != PCI_CLASS_STORAGE_EXPRESS ||
3890 !pcie_has_flr(dev) || !pci_resource_start(dev, 0))
3891 return -ENOTTY;
3892
3893 if (probe)
3894 return 0;
3895
3896 bar = pci_iomap(dev, 0, NVME_REG_CC + sizeof(cfg));
3897 if (!bar)
3898 return -ENOTTY;
3899
3900 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3901 pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MEMORY);
3902
3903 cfg = readl(bar + NVME_REG_CC);
3904
3905
3906 if (cfg & NVME_CC_ENABLE) {
3907 u32 cap = readl(bar + NVME_REG_CAP);
3908 unsigned long timeout;
3909
3910
3911
3912
3913
3914
3915 cfg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
3916
3917 writel(cfg, bar + NVME_REG_CC);
3918
3919
3920
3921
3922
3923
3924
3925
3926 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
3927
3928 for (;;) {
3929 u32 status = readl(bar + NVME_REG_CSTS);
3930
3931
3932 if (!(status & NVME_CSTS_RDY))
3933 break;
3934
3935 msleep(100);
3936
3937 if (time_after(jiffies, timeout)) {
3938 pci_warn(dev, "Timeout waiting for NVMe ready status to clear after disable\n");
3939 break;
3940 }
3941 }
3942 }
3943
3944 pci_iounmap(dev, bar);
3945
3946 pcie_flr(dev);
3947
3948 return 0;
3949}
3950
3951
3952
3953
3954
3955
3956
3957static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
3958{
3959 if (!pcie_has_flr(dev))
3960 return -ENOTTY;
3961
3962 if (probe)
3963 return 0;
3964
3965 pcie_flr(dev);
3966
3967 msleep(250);
3968
3969 return 0;
3970}
3971
3972static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
3973 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
3974 reset_intel_82599_sfp_virtfn },
3975 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
3976 reset_ivb_igd },
3977 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
3978 reset_ivb_igd },
3979 { PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr },
3980 { PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
3981 { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
3982 reset_chelsio_generic_dev },
3983 { 0 }
3984};
3985
3986
3987
3988
3989
3990
3991int pci_dev_specific_reset(struct pci_dev *dev, int probe)
3992{
3993 const struct pci_dev_reset_methods *i;
3994
3995 for (i = pci_dev_reset_methods; i->reset; i++) {
3996 if ((i->vendor == dev->vendor ||
3997 i->vendor == (u16)PCI_ANY_ID) &&
3998 (i->device == dev->device ||
3999 i->device == (u16)PCI_ANY_ID))
4000 return i->reset(dev, probe);
4001 }
4002
4003 return -ENOTTY;
4004}
4005
4006static void quirk_dma_func0_alias(struct pci_dev *dev)
4007{
4008 if (PCI_FUNC(dev->devfn) != 0)
4009 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0), 1);
4010}
4011
4012
4013
4014
4015
4016
4017DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
4018DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
4019
4020static void quirk_dma_func1_alias(struct pci_dev *dev)
4021{
4022 if (PCI_FUNC(dev->devfn) != 1)
4023 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1), 1);
4024}
4025
4026
4027
4028
4029
4030
4031
4032DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
4033 quirk_dma_func1_alias);
4034DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
4035 quirk_dma_func1_alias);
4036DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
4037 quirk_dma_func1_alias);
4038
4039DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
4040 quirk_dma_func1_alias);
4041DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
4042 quirk_dma_func1_alias);
4043
4044DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
4045 quirk_dma_func1_alias);
4046
4047DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
4048 quirk_dma_func1_alias);
4049
4050DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
4051 quirk_dma_func1_alias);
4052
4053DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183,
4054 quirk_dma_func1_alias);
4055
4056DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
4057 quirk_dma_func1_alias);
4058
4059DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
4060 quirk_dma_func1_alias);
4061
4062DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
4063 quirk_dma_func1_alias);
4064DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
4065 quirk_dma_func1_alias);
4066DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
4067 quirk_dma_func1_alias);
4068
4069DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
4070 PCI_DEVICE_ID_JMICRON_JMB388_ESD,
4071 quirk_dma_func1_alias);
4072
4073DECLARE_PCI_FIXUP_HEADER(0x1c28,
4074 0x0122,
4075 quirk_dma_func1_alias);
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092static const struct pci_device_id fixed_dma_alias_tbl[] = {
4093 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
4094 PCI_VENDOR_ID_ADAPTEC2, 0x02bb),
4095 .driver_data = PCI_DEVFN(1, 0) },
4096 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
4097 PCI_VENDOR_ID_ADAPTEC2, 0x02bc),
4098 .driver_data = PCI_DEVFN(1, 0) },
4099 { 0 }
4100};
4101
4102static void quirk_fixed_dma_alias(struct pci_dev *dev)
4103{
4104 const struct pci_device_id *id;
4105
4106 id = pci_match_id(fixed_dma_alias_tbl, dev);
4107 if (id)
4108 pci_add_dma_alias(dev, id->driver_data, 1);
4109}
4110DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev)
4122{
4123 if (!pci_is_root_bus(pdev->bus) &&
4124 pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
4125 !pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) &&
4126 pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE)
4127 pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS;
4128}
4129
4130DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
4131 quirk_use_pcie_bridge_dma_alias);
4132
4133DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
4134
4135DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
4136
4137DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8893, quirk_use_pcie_bridge_dma_alias);
4138
4139DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
4140
4141
4142
4143
4144
4145
4146
4147static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
4148{
4149 pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0), 1);
4150 pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0), 1);
4151 pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3), 1);
4152}
4153DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
4154DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170static void quirk_pex_vca_alias(struct pci_dev *pdev)
4171{
4172 const unsigned int num_pci_slots = 0x20;
4173 unsigned int slot;
4174
4175 for (slot = 0; slot < num_pci_slots; slot++)
4176 pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0), 5);
4177}
4178DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
4179DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
4180DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2956, quirk_pex_vca_alias);
4181DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2958, quirk_pex_vca_alias);
4182DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2959, quirk_pex_vca_alias);
4183DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x295A, quirk_pex_vca_alias);
4184
4185
4186
4187
4188
4189
4190static void quirk_bridge_cavm_thrx2_pcie_root(struct pci_dev *pdev)
4191{
4192 pdev->dev_flags |= PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT;
4193}
4194DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000,
4195 quirk_bridge_cavm_thrx2_pcie_root);
4196DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084,
4197 quirk_bridge_cavm_thrx2_pcie_root);
4198
4199
4200
4201
4202
4203static void quirk_tw686x_class(struct pci_dev *pdev)
4204{
4205 u32 class = pdev->class;
4206
4207
4208 pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01;
4209 pci_info(pdev, "TW686x PCI class overridden (%#08x -> %#08x)\n",
4210 class, pdev->class);
4211}
4212DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8,
4213 quirk_tw686x_class);
4214DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6865, PCI_CLASS_NOT_DEFINED, 8,
4215 quirk_tw686x_class);
4216DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
4217 quirk_tw686x_class);
4218DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
4219 quirk_tw686x_class);
4220
4221
4222
4223
4224
4225
4226static void quirk_relaxedordering_disable(struct pci_dev *dev)
4227{
4228 dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
4229 pci_info(dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
4230}
4231
4232
4233
4234
4235
4236
4237DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
4238 quirk_relaxedordering_disable);
4239DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8,
4240 quirk_relaxedordering_disable);
4241DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8,
4242 quirk_relaxedordering_disable);
4243DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8,
4244 quirk_relaxedordering_disable);
4245DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8,
4246 quirk_relaxedordering_disable);
4247DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8,
4248 quirk_relaxedordering_disable);
4249DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8,
4250 quirk_relaxedordering_disable);
4251DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8,
4252 quirk_relaxedordering_disable);
4253DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8,
4254 quirk_relaxedordering_disable);
4255DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8,
4256 quirk_relaxedordering_disable);
4257DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8,
4258 quirk_relaxedordering_disable);
4259DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8,
4260 quirk_relaxedordering_disable);
4261DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8,
4262 quirk_relaxedordering_disable);
4263DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8,
4264 quirk_relaxedordering_disable);
4265DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8,
4266 quirk_relaxedordering_disable);
4267DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8,
4268 quirk_relaxedordering_disable);
4269DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8,
4270 quirk_relaxedordering_disable);
4271DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8,
4272 quirk_relaxedordering_disable);
4273DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8,
4274 quirk_relaxedordering_disable);
4275DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8,
4276 quirk_relaxedordering_disable);
4277DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8,
4278 quirk_relaxedordering_disable);
4279DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8,
4280 quirk_relaxedordering_disable);
4281DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8,
4282 quirk_relaxedordering_disable);
4283DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8,
4284 quirk_relaxedordering_disable);
4285DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8,
4286 quirk_relaxedordering_disable);
4287DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8,
4288 quirk_relaxedordering_disable);
4289DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8,
4290 quirk_relaxedordering_disable);
4291DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8,
4292 quirk_relaxedordering_disable);
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8,
4304 quirk_relaxedordering_disable);
4305DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8,
4306 quirk_relaxedordering_disable);
4307DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8,
4308 quirk_relaxedordering_disable);
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
4334{
4335 struct pci_dev *root_port = pcie_find_root_port(pdev);
4336
4337 if (!root_port) {
4338 pci_warn(pdev, "PCIe Completion erratum may cause device errors\n");
4339 return;
4340 }
4341
4342 pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
4343 dev_name(&pdev->dev));
4344 pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
4345 PCI_EXP_DEVCTL_RELAX_EN |
4346 PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
4347}
4348
4349
4350
4351
4352
4353static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
4354{
4355
4356
4357
4358
4359
4360
4361 if ((pdev->device & 0xff00) == 0x5400)
4362 quirk_disable_root_port_attributes(pdev);
4363}
4364DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
4365 quirk_chelsio_T5_disable_root_port_attributes);
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena)
4379{
4380 if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req)
4381 return 1;
4382 return 0;
4383}
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
4411{
4412#ifdef CONFIG_ACPI
4413 struct acpi_table_header *header = NULL;
4414 acpi_status status;
4415
4416
4417 if (!dev->multifunction || !pci_is_root_bus(dev->bus))
4418 return -ENODEV;
4419
4420
4421 status = acpi_get_table("IVRS", 0, &header);
4422 if (ACPI_FAILURE(status))
4423 return -ENODEV;
4424
4425 acpi_put_table(header);
4426
4427
4428 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
4429
4430 return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR);
4431#else
4432 return -ENODEV;
4433#endif
4434}
4435
4436static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
4437{
4438 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4439 return false;
4440
4441 switch (dev->device) {
4442
4443
4444
4445
4446 case 0xa000 ... 0xa7ff:
4447 case 0xaf84:
4448 case 0xb884:
4449 return true;
4450 default:
4451 return false;
4452 }
4453}
4454
4455static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
4456{
4457 if (!pci_quirk_cavium_acs_match(dev))
4458 return -ENOTTY;
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468 return pci_acs_ctrl_enabled(acs_flags,
4469 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4470}
4471
4472static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
4473{
4474
4475
4476
4477
4478
4479 return pci_acs_ctrl_enabled(acs_flags,
4480 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4481}
4482
4483
4484
4485
4486
4487
4488static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
4489{
4490 if (!pci_is_pcie(dev) ||
4491 ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
4492 (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
4493 return -ENOTTY;
4494
4495 switch (dev->device) {
4496 case 0x0710 ... 0x071e:
4497 case 0x0721:
4498 case 0x0723 ... 0x0732:
4499 return pci_acs_ctrl_enabled(acs_flags,
4500 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4501 }
4502
4503 return false;
4504}
4505
4506
4507
4508
4509
4510
4511
4512static const u16 pci_quirk_intel_pch_acs_ids[] = {
4513
4514 0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49,
4515 0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51,
4516
4517 0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17,
4518 0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f,
4519
4520 0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17,
4521 0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f,
4522
4523 0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17,
4524 0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f,
4525
4526 0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17,
4527 0x9c18, 0x9c19, 0x9c1a, 0x9c1b,
4528
4529 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
4530 0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
4531
4532 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
4533
4534 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
4535 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
4536
4537 0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e,
4538};
4539
4540static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
4541{
4542 int i;
4543
4544
4545 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4546 return false;
4547
4548 for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++)
4549 if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
4550 return true;
4551
4552 return false;
4553}
4554
4555static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
4556{
4557 if (!pci_quirk_intel_pch_acs_match(dev))
4558 return -ENOTTY;
4559
4560 if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
4561 return pci_acs_ctrl_enabled(acs_flags,
4562 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4563
4564 return pci_acs_ctrl_enabled(acs_flags, 0);
4565}
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4578{
4579 return pci_acs_ctrl_enabled(acs_flags,
4580 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4581}
4582
4583static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
4584{
4585 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4586 return -ENOTTY;
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596 acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4597
4598 return acs_flags ? 0 : 1;
4599}
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4647{
4648 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4649 return false;
4650
4651 switch (dev->device) {
4652 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a:
4653 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee:
4654 case 0x9d10 ... 0x9d1b:
4655 return true;
4656 }
4657
4658 return false;
4659}
4660
4661#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
4662
4663static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
4664{
4665 int pos;
4666 u32 cap, ctrl;
4667
4668 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4669 return -ENOTTY;
4670
4671 pos = dev->acs_cap;
4672 if (!pos)
4673 return -ENOTTY;
4674
4675
4676 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4677 acs_flags &= (cap | PCI_ACS_EC);
4678
4679 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4680
4681 return pci_acs_ctrl_enabled(acs_flags, ctrl);
4682}
4683
4684static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
4685{
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695 return pci_acs_ctrl_enabled(acs_flags,
4696 PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
4697 PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
4698}
4699
4700static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags)
4701{
4702
4703
4704
4705
4706
4707 if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END)
4708 return -ENOTTY;
4709
4710 return pci_acs_ctrl_enabled(acs_flags,
4711 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4712}
4713
4714static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
4715{
4716
4717
4718
4719
4720
4721
4722 return pci_acs_ctrl_enabled(acs_flags,
4723 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4724}
4725
4726static const struct pci_dev_acs_enabled {
4727 u16 vendor;
4728 u16 device;
4729 int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
4730} pci_dev_acs_enabled[] = {
4731 { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
4732 { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
4733 { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
4734 { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
4735 { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
4736 { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
4737 { PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs },
4738 { PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
4739 { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
4740 { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
4741 { PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs },
4742 { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
4743 { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
4744 { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
4745 { PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
4746 { PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
4747 { PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
4748 { PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
4749 { PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
4750 { PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
4751 { PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
4752 { PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
4753 { PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
4754 { PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
4755 { PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
4756 { PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
4757 { PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
4758 { PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
4759 { PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
4760 { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
4761 { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
4762
4763 { PCI_VENDOR_ID_INTEL, 0x1509, pci_quirk_mf_endpoint_acs },
4764 { PCI_VENDOR_ID_INTEL, 0x150E, pci_quirk_mf_endpoint_acs },
4765 { PCI_VENDOR_ID_INTEL, 0x150F, pci_quirk_mf_endpoint_acs },
4766 { PCI_VENDOR_ID_INTEL, 0x1510, pci_quirk_mf_endpoint_acs },
4767 { PCI_VENDOR_ID_INTEL, 0x1511, pci_quirk_mf_endpoint_acs },
4768 { PCI_VENDOR_ID_INTEL, 0x1516, pci_quirk_mf_endpoint_acs },
4769 { PCI_VENDOR_ID_INTEL, 0x1527, pci_quirk_mf_endpoint_acs },
4770
4771 { PCI_VENDOR_ID_INTEL, 0x10C9, pci_quirk_mf_endpoint_acs },
4772 { PCI_VENDOR_ID_INTEL, 0x10E6, pci_quirk_mf_endpoint_acs },
4773 { PCI_VENDOR_ID_INTEL, 0x10E7, pci_quirk_mf_endpoint_acs },
4774 { PCI_VENDOR_ID_INTEL, 0x10E8, pci_quirk_mf_endpoint_acs },
4775 { PCI_VENDOR_ID_INTEL, 0x150A, pci_quirk_mf_endpoint_acs },
4776 { PCI_VENDOR_ID_INTEL, 0x150D, pci_quirk_mf_endpoint_acs },
4777 { PCI_VENDOR_ID_INTEL, 0x1518, pci_quirk_mf_endpoint_acs },
4778 { PCI_VENDOR_ID_INTEL, 0x1526, pci_quirk_mf_endpoint_acs },
4779
4780 { PCI_VENDOR_ID_INTEL, 0x10A7, pci_quirk_mf_endpoint_acs },
4781 { PCI_VENDOR_ID_INTEL, 0x10A9, pci_quirk_mf_endpoint_acs },
4782 { PCI_VENDOR_ID_INTEL, 0x10D6, pci_quirk_mf_endpoint_acs },
4783
4784 { PCI_VENDOR_ID_INTEL, 0x1521, pci_quirk_mf_endpoint_acs },
4785 { PCI_VENDOR_ID_INTEL, 0x1522, pci_quirk_mf_endpoint_acs },
4786 { PCI_VENDOR_ID_INTEL, 0x1523, pci_quirk_mf_endpoint_acs },
4787 { PCI_VENDOR_ID_INTEL, 0x1524, pci_quirk_mf_endpoint_acs },
4788
4789 { PCI_VENDOR_ID_INTEL, 0x105E, pci_quirk_mf_endpoint_acs },
4790 { PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs },
4791 { PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs },
4792 { PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs },
4793
4794 { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
4795 { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
4796 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs },
4797
4798 { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
4799 { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
4800
4801 { PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs },
4802
4803 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
4804 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
4805 { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs },
4806 { 0x10df, 0x720, pci_quirk_mf_endpoint_acs },
4807
4808 { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
4809
4810 { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
4811
4812 { PCI_VENDOR_ID_AMPERE, 0xE005, pci_quirk_xgene_acs },
4813 { PCI_VENDOR_ID_AMPERE, 0xE006, pci_quirk_xgene_acs },
4814 { PCI_VENDOR_ID_AMPERE, 0xE007, pci_quirk_xgene_acs },
4815 { PCI_VENDOR_ID_AMPERE, 0xE008, pci_quirk_xgene_acs },
4816 { PCI_VENDOR_ID_AMPERE, 0xE009, pci_quirk_xgene_acs },
4817 { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
4818 { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
4819 { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
4820 { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
4821
4822 { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
4823
4824 { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
4825 { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
4826 { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
4827
4828 { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
4829 { 0 }
4830};
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
4844{
4845 const struct pci_dev_acs_enabled *i;
4846 int ret;
4847
4848
4849
4850
4851
4852
4853
4854 for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
4855 if ((i->vendor == dev->vendor ||
4856 i->vendor == (u16)PCI_ANY_ID) &&
4857 (i->device == dev->device ||
4858 i->device == (u16)PCI_ANY_ID)) {
4859 ret = i->acs_enabled(dev, acs_flags);
4860 if (ret >= 0)
4861 return ret;
4862 }
4863 }
4864
4865 return -ENOTTY;
4866}
4867
4868
4869#define INTEL_LPC_RCBA_REG 0xf0
4870
4871#define INTEL_LPC_RCBA_MASK 0xffffc000
4872
4873#define INTEL_LPC_RCBA_ENABLE (1 << 0)
4874
4875
4876#define INTEL_BSPR_REG 0x1104
4877
4878#define INTEL_BSPR_REG_BPNPD (1 << 8)
4879
4880#define INTEL_BSPR_REG_BPPD (1 << 9)
4881
4882
4883#define INTEL_UPDCR_REG 0x1014
4884
4885#define INTEL_UPDCR_REG_MASK 0x3f
4886
4887static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
4888{
4889 u32 rcba, bspr, updcr;
4890 void __iomem *rcba_mem;
4891
4892
4893
4894
4895
4896
4897 pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
4898 INTEL_LPC_RCBA_REG, &rcba);
4899 if (!(rcba & INTEL_LPC_RCBA_ENABLE))
4900 return -EINVAL;
4901
4902 rcba_mem = ioremap(rcba & INTEL_LPC_RCBA_MASK,
4903 PAGE_ALIGN(INTEL_UPDCR_REG));
4904 if (!rcba_mem)
4905 return -ENOMEM;
4906
4907
4908
4909
4910
4911
4912
4913
4914 bspr = readl(rcba_mem + INTEL_BSPR_REG);
4915 bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD;
4916 if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
4917 updcr = readl(rcba_mem + INTEL_UPDCR_REG);
4918 if (updcr & INTEL_UPDCR_REG_MASK) {
4919 pci_info(dev, "Disabling UPDCR peer decodes\n");
4920 updcr &= ~INTEL_UPDCR_REG_MASK;
4921 writel(updcr, rcba_mem + INTEL_UPDCR_REG);
4922 }
4923 }
4924
4925 iounmap(rcba_mem);
4926 return 0;
4927}
4928
4929
4930#define INTEL_MPC_REG 0xd8
4931
4932#define INTEL_MPC_REG_IRBNCE (1 << 26)
4933
4934static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
4935{
4936 u32 mpc;
4937
4938
4939
4940
4941
4942
4943
4944 pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
4945 if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
4946 pci_info(dev, "Enabling MPC IRBNCE\n");
4947 mpc |= INTEL_MPC_REG_IRBNCE;
4948 pci_write_config_word(dev, INTEL_MPC_REG, mpc);
4949 }
4950}
4951
4952static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
4953{
4954 if (!pci_quirk_intel_pch_acs_match(dev))
4955 return -ENOTTY;
4956
4957 if (pci_quirk_enable_intel_lpc_acs(dev)) {
4958 pci_warn(dev, "Failed to enable Intel PCH ACS quirk\n");
4959 return 0;
4960 }
4961
4962 pci_quirk_enable_intel_rp_mpc_acs(dev);
4963
4964 dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
4965
4966 pci_info(dev, "Intel PCH root port ACS workaround enabled\n");
4967
4968 return 0;
4969}
4970
4971static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
4972{
4973 int pos;
4974 u32 cap, ctrl;
4975
4976 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4977 return -ENOTTY;
4978
4979 pos = dev->acs_cap;
4980 if (!pos)
4981 return -ENOTTY;
4982
4983 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4984 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4985
4986 ctrl |= (cap & PCI_ACS_SV);
4987 ctrl |= (cap & PCI_ACS_RR);
4988 ctrl |= (cap & PCI_ACS_CR);
4989 ctrl |= (cap & PCI_ACS_UF);
4990
4991 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
4992
4993 pci_info(dev, "Intel SPT PCH root port ACS workaround enabled\n");
4994
4995 return 0;
4996}
4997
4998static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev)
4999{
5000 int pos;
5001 u32 cap, ctrl;
5002
5003 if (!pci_quirk_intel_spt_pch_acs_match(dev))
5004 return -ENOTTY;
5005
5006 pos = dev->acs_cap;
5007 if (!pos)
5008 return -ENOTTY;
5009
5010 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
5011 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
5012
5013 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
5014
5015 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
5016
5017 pci_info(dev, "Intel SPT PCH root port workaround: disabled ACS redirect\n");
5018
5019 return 0;
5020}
5021
5022static const struct pci_dev_acs_ops {
5023 u16 vendor;
5024 u16 device;
5025 int (*enable_acs)(struct pci_dev *dev);
5026 int (*disable_acs_redir)(struct pci_dev *dev);
5027} pci_dev_acs_ops[] = {
5028 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
5029 .enable_acs = pci_quirk_enable_intel_pch_acs,
5030 },
5031 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
5032 .enable_acs = pci_quirk_enable_intel_spt_pch_acs,
5033 .disable_acs_redir = pci_quirk_disable_intel_spt_pch_acs_redir,
5034 },
5035};
5036
5037int pci_dev_specific_enable_acs(struct pci_dev *dev)
5038{
5039 const struct pci_dev_acs_ops *p;
5040 int i, ret;
5041
5042 for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
5043 p = &pci_dev_acs_ops[i];
5044 if ((p->vendor == dev->vendor ||
5045 p->vendor == (u16)PCI_ANY_ID) &&
5046 (p->device == dev->device ||
5047 p->device == (u16)PCI_ANY_ID) &&
5048 p->enable_acs) {
5049 ret = p->enable_acs(dev);
5050 if (ret >= 0)
5051 return ret;
5052 }
5053 }
5054
5055 return -ENOTTY;
5056}
5057
5058int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
5059{
5060 const struct pci_dev_acs_ops *p;
5061 int i, ret;
5062
5063 for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
5064 p = &pci_dev_acs_ops[i];
5065 if ((p->vendor == dev->vendor ||
5066 p->vendor == (u16)PCI_ANY_ID) &&
5067 (p->device == dev->device ||
5068 p->device == (u16)PCI_ANY_ID) &&
5069 p->disable_acs_redir) {
5070 ret = p->disable_acs_redir(dev);
5071 if (ret >= 0)
5072 return ret;
5073 }
5074 }
5075
5076 return -ENOTTY;
5077}
5078
5079
5080
5081
5082
5083
5084
5085
5086static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
5087{
5088 int pos, i = 0;
5089 u8 next_cap;
5090 u16 reg16, *cap;
5091 struct pci_cap_saved_state *state;
5092
5093
5094 if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP))
5095 return;
5096
5097
5098 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
5099 if (!pos)
5100 return;
5101
5102
5103
5104
5105
5106 pci_read_config_byte(pdev, pos + 1, &next_cap);
5107 if (next_cap)
5108 return;
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118 pos = 0x50;
5119 pci_read_config_word(pdev, pos, ®16);
5120 if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) {
5121 u32 status;
5122#ifndef PCI_EXP_SAVE_REGS
5123#define PCI_EXP_SAVE_REGS 7
5124#endif
5125 int size = PCI_EXP_SAVE_REGS * sizeof(u16);
5126
5127 pdev->pcie_cap = pos;
5128 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
5129 pdev->pcie_flags_reg = reg16;
5130 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16);
5131 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
5132
5133 pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
5134 if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
5135 PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
5136 pdev->cfg_size = PCI_CFG_SPACE_SIZE;
5137
5138 if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
5139 return;
5140
5141
5142 state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
5143 if (!state)
5144 return;
5145
5146 state->cap.cap_nr = PCI_CAP_ID_EXP;
5147 state->cap.cap_extended = 0;
5148 state->cap.size = size;
5149 cap = (u16 *)&state->cap.data[0];
5150 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]);
5151 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]);
5152 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]);
5153 pcie_capability_read_word(pdev, PCI_EXP_RTCTL, &cap[i++]);
5154 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]);
5155 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]);
5156 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]);
5157 hlist_add_head(&state->next, &pdev->saved_cap_space);
5158 }
5159}
5160DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172static void quirk_no_flr(struct pci_dev *dev)
5173{
5174 dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
5175}
5176DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
5177DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
5178DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
5179DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
5180DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
5181
5182static void quirk_no_ext_tags(struct pci_dev *pdev)
5183{
5184 struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
5185
5186 if (!bridge)
5187 return;
5188
5189 bridge->no_ext_tags = 1;
5190 pci_info(pdev, "disabling Extended Tags (this device can't handle them)\n");
5191
5192 pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
5193}
5194DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
5195DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
5196DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
5197DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags);
5198DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);
5199DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
5200DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
5201
5202#ifdef CONFIG_PCI_ATS
5203
5204
5205
5206
5207
5208static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
5209{
5210 if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
5211 (pdev->device == 0x7340 && pdev->revision != 0xc5))
5212 return;
5213
5214 pci_info(pdev, "disabling ATS\n");
5215 pdev->ats_cap = 0;
5216}
5217
5218
5219DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
5220
5221DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
5222
5223DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
5224
5225DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
5226#endif
5227
5228
5229static void quirk_fsl_no_msi(struct pci_dev *pdev)
5230{
5231 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
5232 pdev->no_msi = 1;
5233}
5234DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer,
5245 unsigned int supplier, unsigned int class,
5246 unsigned int class_shift)
5247{
5248 struct pci_dev *supplier_pdev;
5249
5250 if (PCI_FUNC(pdev->devfn) != consumer)
5251 return;
5252
5253 supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
5254 pdev->bus->number,
5255 PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier));
5256 if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) {
5257 pci_dev_put(supplier_pdev);
5258 return;
5259 }
5260
5261 if (device_link_add(&pdev->dev, &supplier_pdev->dev,
5262 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME))
5263 pci_info(pdev, "D0 power state depends on %s\n",
5264 pci_name(supplier_pdev));
5265 else
5266 pci_err(pdev, "Cannot enforce power dependency on %s\n",
5267 pci_name(supplier_pdev));
5268
5269 pm_runtime_allow(&pdev->dev);
5270 pci_dev_put(supplier_pdev);
5271}
5272
5273
5274
5275
5276
5277static void quirk_gpu_hda(struct pci_dev *hda)
5278{
5279 pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16);
5280}
5281DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
5282 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5283DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID,
5284 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5285DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5286 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5287
5288
5289
5290
5291
5292static void quirk_gpu_usb(struct pci_dev *usb)
5293{
5294 pci_create_device_link(usb, 2, 0, PCI_BASE_CLASS_DISPLAY, 16);
5295}
5296DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5297 PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
5298
5299
5300
5301
5302
5303
5304
5305#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80
5306static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
5307{
5308 pci_create_device_link(ucsi, 3, 0, PCI_BASE_CLASS_DISPLAY, 16);
5309}
5310DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5311 PCI_CLASS_SERIAL_UNKNOWN, 8,
5312 quirk_gpu_usb_typec_ucsi);
5313
5314
5315
5316
5317
5318static void quirk_nvidia_hda(struct pci_dev *gpu)
5319{
5320 u8 hdr_type;
5321 u32 val;
5322
5323
5324 if (gpu->device < PCI_DEVICE_ID_NVIDIA_GEFORCE_320M)
5325 return;
5326
5327
5328 pci_read_config_dword(gpu, 0x488, &val);
5329 if (val & BIT(25))
5330 return;
5331
5332 pci_info(gpu, "Enabling HDA controller\n");
5333 pci_write_config_dword(gpu, 0x488, val | BIT(25));
5334
5335
5336 pci_read_config_byte(gpu, PCI_HEADER_TYPE, &hdr_type);
5337 gpu->multifunction = !!(hdr_type & 0x80);
5338}
5339DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5340 PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
5341DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5342 PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367
5368
5369int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout)
5370{
5371 int pos;
5372 u16 ctrl = 0;
5373 bool found;
5374 struct pci_dev *bridge = bus->self;
5375
5376 pos = bridge->acs_cap;
5377
5378
5379 if (pos) {
5380 pci_read_config_word(bridge, pos + PCI_ACS_CTRL, &ctrl);
5381 if (ctrl & PCI_ACS_SV)
5382 pci_write_config_word(bridge, pos + PCI_ACS_CTRL,
5383 ctrl & ~PCI_ACS_SV);
5384 }
5385
5386 found = pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
5387
5388
5389 if (found)
5390 pci_bus_write_config_word(bus, devfn, PCI_VENDOR_ID, 0);
5391
5392
5393 if (ctrl & PCI_ACS_SV)
5394 pci_write_config_word(bridge, pos + PCI_ACS_CTRL, ctrl);
5395
5396 return found;
5397}
5398
5399
5400
5401
5402
5403
5404
5405
5406static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
5407{
5408 void __iomem *mmio;
5409 struct ntb_info_regs __iomem *mmio_ntb;
5410 struct ntb_ctrl_regs __iomem *mmio_ctrl;
5411 u64 partition_map;
5412 u8 partition;
5413 int pp;
5414
5415 if (pci_enable_device(pdev)) {
5416 pci_err(pdev, "Cannot enable Switchtec device\n");
5417 return;
5418 }
5419
5420 mmio = pci_iomap(pdev, 0, 0);
5421 if (mmio == NULL) {
5422 pci_disable_device(pdev);
5423 pci_err(pdev, "Cannot iomap Switchtec device\n");
5424 return;
5425 }
5426
5427 pci_info(pdev, "Setting Switchtec proxy ID aliases\n");
5428
5429 mmio_ntb = mmio + SWITCHTEC_GAS_NTB_OFFSET;
5430 mmio_ctrl = (void __iomem *) mmio_ntb + SWITCHTEC_NTB_REG_CTRL_OFFSET;
5431
5432 partition = ioread8(&mmio_ntb->partition_id);
5433
5434 partition_map = ioread32(&mmio_ntb->ep_map);
5435 partition_map |= ((u64) ioread32(&mmio_ntb->ep_map + 4)) << 32;
5436 partition_map &= ~(1ULL << partition);
5437
5438 for (pp = 0; pp < (sizeof(partition_map) * 8); pp++) {
5439 struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
5440 u32 table_sz = 0;
5441 int te;
5442
5443 if (!(partition_map & (1ULL << pp)))
5444 continue;
5445
5446 pci_dbg(pdev, "Processing partition %d\n", pp);
5447
5448 mmio_peer_ctrl = &mmio_ctrl[pp];
5449
5450 table_sz = ioread16(&mmio_peer_ctrl->req_id_table_size);
5451 if (!table_sz) {
5452 pci_warn(pdev, "Partition %d table_sz 0\n", pp);
5453 continue;
5454 }
5455
5456 if (table_sz > 512) {
5457 pci_warn(pdev,
5458 "Invalid Switchtec partition %d table_sz %d\n",
5459 pp, table_sz);
5460 continue;
5461 }
5462
5463 for (te = 0; te < table_sz; te++) {
5464 u32 rid_entry;
5465 u8 devfn;
5466
5467 rid_entry = ioread32(&mmio_peer_ctrl->req_id_table[te]);
5468 devfn = (rid_entry >> 1) & 0xFF;
5469 pci_dbg(pdev,
5470 "Aliasing Partition %d Proxy ID %02x.%d\n",
5471 pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
5472 pci_add_dma_alias(pdev, devfn, 1);
5473 }
5474 }
5475
5476 pci_iounmap(pdev, mmio);
5477 pci_disable_device(pdev);
5478}
5479#define SWITCHTEC_QUIRK(vid) \
5480 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
5481 PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
5482
5483SWITCHTEC_QUIRK(0x8531);
5484SWITCHTEC_QUIRK(0x8532);
5485SWITCHTEC_QUIRK(0x8533);
5486SWITCHTEC_QUIRK(0x8534);
5487SWITCHTEC_QUIRK(0x8535);
5488SWITCHTEC_QUIRK(0x8536);
5489SWITCHTEC_QUIRK(0x8541);
5490SWITCHTEC_QUIRK(0x8542);
5491SWITCHTEC_QUIRK(0x8543);
5492SWITCHTEC_QUIRK(0x8544);
5493SWITCHTEC_QUIRK(0x8545);
5494SWITCHTEC_QUIRK(0x8546);
5495SWITCHTEC_QUIRK(0x8551);
5496SWITCHTEC_QUIRK(0x8552);
5497SWITCHTEC_QUIRK(0x8553);
5498SWITCHTEC_QUIRK(0x8554);
5499SWITCHTEC_QUIRK(0x8555);
5500SWITCHTEC_QUIRK(0x8556);
5501SWITCHTEC_QUIRK(0x8561);
5502SWITCHTEC_QUIRK(0x8562);
5503SWITCHTEC_QUIRK(0x8563);
5504SWITCHTEC_QUIRK(0x8564);
5505SWITCHTEC_QUIRK(0x8565);
5506SWITCHTEC_QUIRK(0x8566);
5507SWITCHTEC_QUIRK(0x8571);
5508SWITCHTEC_QUIRK(0x8572);
5509SWITCHTEC_QUIRK(0x8573);
5510SWITCHTEC_QUIRK(0x8574);
5511SWITCHTEC_QUIRK(0x8575);
5512SWITCHTEC_QUIRK(0x8576);
5513SWITCHTEC_QUIRK(0x4000);
5514SWITCHTEC_QUIRK(0x4084);
5515SWITCHTEC_QUIRK(0x4068);
5516SWITCHTEC_QUIRK(0x4052);
5517SWITCHTEC_QUIRK(0x4036);
5518SWITCHTEC_QUIRK(0x4028);
5519SWITCHTEC_QUIRK(0x4100);
5520SWITCHTEC_QUIRK(0x4184);
5521SWITCHTEC_QUIRK(0x4168);
5522SWITCHTEC_QUIRK(0x4152);
5523SWITCHTEC_QUIRK(0x4136);
5524SWITCHTEC_QUIRK(0x4128);
5525SWITCHTEC_QUIRK(0x4200);
5526SWITCHTEC_QUIRK(0x4284);
5527SWITCHTEC_QUIRK(0x4268);
5528SWITCHTEC_QUIRK(0x4252);
5529SWITCHTEC_QUIRK(0x4236);
5530SWITCHTEC_QUIRK(0x4228);
5531
5532
5533
5534
5535
5536
5537
5538static void quirk_plx_ntb_dma_alias(struct pci_dev *pdev)
5539{
5540 pci_info(pdev, "Setting PLX NTB proxy ID aliases\n");
5541
5542 pci_add_dma_alias(pdev, 0, 256);
5543}
5544DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b0, quirk_plx_ntb_dma_alias);
5545DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b1, quirk_plx_ntb_dma_alias);
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561
5562
5563static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
5564{
5565 void __iomem *map;
5566 int ret;
5567
5568 if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
5569 pdev->subsystem_device != 0x222e ||
5570 !pdev->reset_fn)
5571 return;
5572
5573 if (pci_enable_device_mem(pdev))
5574 return;
5575
5576
5577
5578
5579
5580 map = pci_iomap(pdev, 0, 0x23000);
5581 if (!map) {
5582 pci_err(pdev, "Can't map MMIO space\n");
5583 goto out_disable;
5584 }
5585
5586
5587
5588
5589
5590 if (ioread32(map + 0x2240c) & 0x2) {
5591 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
5592 ret = pci_reset_bus(pdev);
5593 if (ret < 0)
5594 pci_err(pdev, "Failed to reset GPU: %d\n", ret);
5595 }
5596
5597 iounmap(map);
5598out_disable:
5599 pci_disable_device(pdev);
5600}
5601DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
5602 PCI_CLASS_DISPLAY_VGA, 8,
5603 quirk_reset_lenovo_thinkpad_p50_nvgpu);
5604
5605
5606
5607
5608
5609static void pci_fixup_no_d0_pme(struct pci_dev *dev)
5610{
5611 pci_info(dev, "PME# does not work under D0, disabling it\n");
5612 dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
5613}
5614DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
5615
5616
5617
5618
5619
5620
5621static void pci_fixup_no_pme(struct pci_dev *dev)
5622{
5623 pci_info(dev, "PME# is unreliable, disabling it\n");
5624 dev->pme_support = 0;
5625}
5626DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_pme);
5627DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_pme);
5628
5629static void apex_pci_fixup_class(struct pci_dev *pdev)
5630{
5631 pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
5632}
5633DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a,
5634 PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
5635