1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/pci.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/acpi.h>
22#include <linux/dmi.h>
23#include <linux/pci-aspm.h>
24#include <linux/ioport.h>
25#include <linux/sched.h>
26#include <linux/ktime.h>
27#include <linux/mm.h>
28#include <linux/nvme.h>
29#include <linux/platform_data/x86/apple.h>
30#include <linux/pm_runtime.h>
31#include <linux/switchtec.h>
32#include <asm/dma.h>
33#include "pci.h"
34
35static ktime_t fixup_debug_start(struct pci_dev *dev,
36 void (*fn)(struct pci_dev *dev))
37{
38 if (initcall_debug)
39 pci_info(dev, "calling %pS @ %i\n", fn, task_pid_nr(current));
40
41 return ktime_get();
42}
43
44static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
45 void (*fn)(struct pci_dev *dev))
46{
47 ktime_t delta, rettime;
48 unsigned long long duration;
49
50 rettime = ktime_get();
51 delta = ktime_sub(rettime, calltime);
52 duration = (unsigned long long) ktime_to_ns(delta) >> 10;
53 if (initcall_debug || duration > 10000)
54 pci_info(dev, "%pS took %lld usecs\n", fn, duration);
55}
56
57static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
58 struct pci_fixup *end)
59{
60 ktime_t calltime;
61
62 for (; f < end; f++)
63 if ((f->class == (u32) (dev->class >> f->class_shift) ||
64 f->class == (u32) PCI_ANY_ID) &&
65 (f->vendor == dev->vendor ||
66 f->vendor == (u16) PCI_ANY_ID) &&
67 (f->device == dev->device ||
68 f->device == (u16) PCI_ANY_ID)) {
69 void (*hook)(struct pci_dev *dev);
70#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
71 hook = offset_to_ptr(&f->hook_offset);
72#else
73 hook = f->hook;
74#endif
75 calltime = fixup_debug_start(dev, hook);
76 hook(dev);
77 fixup_debug_report(dev, calltime, hook);
78 }
79}
80
81extern struct pci_fixup __start_pci_fixups_early[];
82extern struct pci_fixup __end_pci_fixups_early[];
83extern struct pci_fixup __start_pci_fixups_header[];
84extern struct pci_fixup __end_pci_fixups_header[];
85extern struct pci_fixup __start_pci_fixups_final[];
86extern struct pci_fixup __end_pci_fixups_final[];
87extern struct pci_fixup __start_pci_fixups_enable[];
88extern struct pci_fixup __end_pci_fixups_enable[];
89extern struct pci_fixup __start_pci_fixups_resume[];
90extern struct pci_fixup __end_pci_fixups_resume[];
91extern struct pci_fixup __start_pci_fixups_resume_early[];
92extern struct pci_fixup __end_pci_fixups_resume_early[];
93extern struct pci_fixup __start_pci_fixups_suspend[];
94extern struct pci_fixup __end_pci_fixups_suspend[];
95extern struct pci_fixup __start_pci_fixups_suspend_late[];
96extern struct pci_fixup __end_pci_fixups_suspend_late[];
97
98static bool pci_apply_fixup_final_quirks;
99
100void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
101{
102 struct pci_fixup *start, *end;
103
104 switch (pass) {
105 case pci_fixup_early:
106 start = __start_pci_fixups_early;
107 end = __end_pci_fixups_early;
108 break;
109
110 case pci_fixup_header:
111 start = __start_pci_fixups_header;
112 end = __end_pci_fixups_header;
113 break;
114
115 case pci_fixup_final:
116 if (!pci_apply_fixup_final_quirks)
117 return;
118 start = __start_pci_fixups_final;
119 end = __end_pci_fixups_final;
120 break;
121
122 case pci_fixup_enable:
123 start = __start_pci_fixups_enable;
124 end = __end_pci_fixups_enable;
125 break;
126
127 case pci_fixup_resume:
128 start = __start_pci_fixups_resume;
129 end = __end_pci_fixups_resume;
130 break;
131
132 case pci_fixup_resume_early:
133 start = __start_pci_fixups_resume_early;
134 end = __end_pci_fixups_resume_early;
135 break;
136
137 case pci_fixup_suspend:
138 start = __start_pci_fixups_suspend;
139 end = __end_pci_fixups_suspend;
140 break;
141
142 case pci_fixup_suspend_late:
143 start = __start_pci_fixups_suspend_late;
144 end = __end_pci_fixups_suspend_late;
145 break;
146
147 default:
148
149 return;
150 }
151 pci_do_fixups(dev, start, end);
152}
153EXPORT_SYMBOL(pci_fixup_device);
154
155static int __init pci_apply_final_quirks(void)
156{
157 struct pci_dev *dev = NULL;
158 u8 cls = 0;
159 u8 tmp;
160
161 if (pci_cache_line_size)
162 pr_info("PCI: CLS %u bytes\n", pci_cache_line_size << 2);
163
164 pci_apply_fixup_final_quirks = true;
165 for_each_pci_dev(dev) {
166 pci_fixup_device(pci_fixup_final, dev);
167
168
169
170
171
172 if (!pci_cache_line_size) {
173 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
174 if (!cls)
175 cls = tmp;
176 if (!tmp || cls == tmp)
177 continue;
178
179 pci_info(dev, "CLS mismatch (%u != %u), using %u bytes\n",
180 cls << 2, tmp << 2,
181 pci_dfl_cache_line_size << 2);
182 pci_cache_line_size = pci_dfl_cache_line_size;
183 }
184 }
185
186 if (!pci_cache_line_size) {
187 pr_info("PCI: CLS %u bytes, default %u\n", cls << 2,
188 pci_dfl_cache_line_size << 2);
189 pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
190 }
191
192 return 0;
193}
194fs_initcall_sync(pci_apply_final_quirks);
195
196
197
198
199
200
201
202static void quirk_mmio_always_on(struct pci_dev *dev)
203{
204 dev->mmio_always_on = 1;
205}
206DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
207 PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
208
209
210
211
212
213
214static void quirk_mellanox_tavor(struct pci_dev *dev)
215{
216 dev->broken_parity_status = 1;
217}
218DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
219DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
220
221
222
223
224
225static void quirk_passive_release(struct pci_dev *dev)
226{
227 struct pci_dev *d = NULL;
228 unsigned char dlc;
229
230
231
232
233
234 while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
235 pci_read_config_byte(d, 0x82, &dlc);
236 if (!(dlc & 1<<1)) {
237 pci_info(d, "PIIX3: Enabling Passive Release\n");
238 dlc |= 1<<1;
239 pci_write_config_byte(d, 0x82, dlc);
240 }
241 }
242}
243DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
244DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
245
246
247
248
249
250
251
252
253
254static void quirk_isa_dma_hangs(struct pci_dev *dev)
255{
256 if (!isa_dma_bridge_buggy) {
257 isa_dma_bridge_buggy = 1;
258 pci_info(dev, "Activating ISA DMA hang workarounds\n");
259 }
260}
261
262
263
264
265DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
266DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
267DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
268DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
269DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
270DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
271DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
272
273
274
275
276
277static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
278{
279 u32 pmbase;
280 u16 pm1a;
281
282 pci_read_config_dword(dev, 0x40, &pmbase);
283 pmbase = pmbase & 0xff80;
284 pm1a = inw(pmbase);
285
286 if (pm1a & 0x10) {
287 pci_info(dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
288 outw(0x10, pmbase);
289 }
290}
291DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
292
293
294static void quirk_nopcipci(struct pci_dev *dev)
295{
296 if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
297 pci_info(dev, "Disabling direct PCI/PCI transfers\n");
298 pci_pci_problems |= PCIPCI_FAIL;
299 }
300}
301DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci);
302DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci);
303
304static void quirk_nopciamd(struct pci_dev *dev)
305{
306 u8 rev;
307 pci_read_config_byte(dev, 0x08, &rev);
308 if (rev == 0x13) {
309
310 pci_info(dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
311 pci_pci_problems |= PCIAGP_FAIL;
312 }
313}
314DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd);
315
316
317static void quirk_triton(struct pci_dev *dev)
318{
319 if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
320 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
321 pci_pci_problems |= PCIPCI_TRITON;
322 }
323}
324DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
325DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
326DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
327DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
328
329
330
331
332
333
334
335
336
337
338
339static void quirk_vialatency(struct pci_dev *dev)
340{
341 struct pci_dev *p;
342 u8 busarb;
343
344
345
346
347
348 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
349 if (p != NULL) {
350
351
352
353
354
355
356 if (p->revision < 0x40 || p->revision > 0x42)
357 goto exit;
358 } else {
359 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
360 if (p == NULL)
361 goto exit;
362
363
364 if (p->revision < 0x10 || p->revision > 0x12)
365 goto exit;
366 }
367
368
369
370
371
372
373
374
375
376
377
378
379
380 pci_read_config_byte(dev, 0x76, &busarb);
381
382
383
384
385
386 busarb &= ~(1<<5);
387 busarb |= (1<<4);
388 pci_write_config_byte(dev, 0x76, busarb);
389 pci_info(dev, "Applying VIA southbridge workaround\n");
390exit:
391 pci_dev_put(p);
392}
393DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
394DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
395DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
396
397DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
398DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
399DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
400
401
402static void quirk_viaetbf(struct pci_dev *dev)
403{
404 if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
405 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
406 pci_pci_problems |= PCIPCI_VIAETBF;
407 }
408}
409DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf);
410
411static void quirk_vsfx(struct pci_dev *dev)
412{
413 if ((pci_pci_problems&PCIPCI_VSFX) == 0) {
414 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
415 pci_pci_problems |= PCIPCI_VSFX;
416 }
417}
418DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx);
419
420
421
422
423
424
425static void quirk_alimagik(struct pci_dev *dev)
426{
427 if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) {
428 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
429 pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
430 }
431}
432DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
433DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
434
435
436static void quirk_natoma(struct pci_dev *dev)
437{
438 if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
439 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
440 pci_pci_problems |= PCIPCI_NATOMA;
441 }
442}
443DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
444DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
445DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
446DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
447DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
448DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
449
450
451
452
453
454static void quirk_citrine(struct pci_dev *dev)
455{
456 dev->cfg_size = 0xA0;
457}
458DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
459
460
461
462
463
464static void quirk_nfp6000(struct pci_dev *dev)
465{
466 dev->cfg_size = 0x600;
467}
468DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000);
469DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000);
470DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, quirk_nfp6000);
471DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000);
472
473
474static void quirk_extend_bar_to_page(struct pci_dev *dev)
475{
476 int i;
477
478 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
479 struct resource *r = &dev->resource[i];
480
481 if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
482 r->end = PAGE_SIZE - 1;
483 r->start = 0;
484 r->flags |= IORESOURCE_UNSET;
485 pci_info(dev, "expanded BAR %d to page size: %pR\n",
486 i, r);
487 }
488 }
489}
490DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
491
492
493
494
495
496static void quirk_s3_64M(struct pci_dev *dev)
497{
498 struct resource *r = &dev->resource[0];
499
500 if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
501 r->flags |= IORESOURCE_UNSET;
502 r->start = 0;
503 r->end = 0x3ffffff;
504 }
505}
506DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
507DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
508
509static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
510 const char *name)
511{
512 u32 region;
513 struct pci_bus_region bus_region;
514 struct resource *res = dev->resource + pos;
515
516 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion);
517
518 if (!region)
519 return;
520
521 res->name = pci_name(dev);
522 res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
523 res->flags |=
524 (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
525 region &= ~(size - 1);
526
527
528 bus_region.start = region;
529 bus_region.end = region + size - 1;
530 pcibios_bus_to_resource(dev->bus, res, &bus_region);
531
532 pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
533 name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
534}
535
536
537
538
539
540
541
542
543
544
545static void quirk_cs5536_vsa(struct pci_dev *dev)
546{
547 static char *name = "CS5536 ISA bridge";
548
549 if (pci_resource_len(dev, 0) != 8) {
550 quirk_io(dev, 0, 8, name);
551 quirk_io(dev, 1, 256, name);
552 quirk_io(dev, 2, 64, name);
553 pci_info(dev, "%s bug detected (incorrect header); workaround applied\n",
554 name);
555 }
556}
557DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
558
559static void quirk_io_region(struct pci_dev *dev, int port,
560 unsigned size, int nr, const char *name)
561{
562 u16 region;
563 struct pci_bus_region bus_region;
564 struct resource *res = dev->resource + nr;
565
566 pci_read_config_word(dev, port, ®ion);
567 region &= ~(size - 1);
568
569 if (!region)
570 return;
571
572 res->name = pci_name(dev);
573 res->flags = IORESOURCE_IO;
574
575
576 bus_region.start = region;
577 bus_region.end = region + size - 1;
578 pcibios_bus_to_resource(dev->bus, res, &bus_region);
579
580 if (!pci_claim_resource(dev, nr))
581 pci_info(dev, "quirk: %pR claimed by %s\n", res, name);
582}
583
584
585
586
587
588static void quirk_ati_exploding_mce(struct pci_dev *dev)
589{
590 pci_info(dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
591
592 request_region(0x3b0, 0x0C, "RadeonIGP");
593 request_region(0x3d3, 0x01, "RadeonIGP");
594}
595DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce);
596
597
598
599
600
601
602
603
604
605
606
607
608static void quirk_amd_nl_class(struct pci_dev *pdev)
609{
610 u32 class = pdev->class;
611
612
613 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
614 pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
615 class, pdev->class);
616}
617DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
618 quirk_amd_nl_class);
619
620
621
622
623
624
625
626
627static void quirk_synopsys_haps(struct pci_dev *pdev)
628{
629 u32 class = pdev->class;
630
631 switch (pdev->device) {
632 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3:
633 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI:
634 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31:
635 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
636 pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
637 class, pdev->class);
638 break;
639 }
640}
641DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
642 PCI_CLASS_SERIAL_USB_XHCI, 0,
643 quirk_synopsys_haps);
644
645
646
647
648
649
650
651
652
653
654
655static void quirk_ali7101_acpi(struct pci_dev *dev)
656{
657 quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
658 quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
659}
660DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi);
661
662static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
663{
664 u32 devres;
665 u32 mask, size, base;
666
667 pci_read_config_dword(dev, port, &devres);
668 if ((devres & enable) != enable)
669 return;
670 mask = (devres >> 16) & 15;
671 base = devres & 0xffff;
672 size = 16;
673 for (;;) {
674 unsigned bit = size >> 1;
675 if ((bit & mask) == bit)
676 break;
677 size = bit;
678 }
679
680
681
682
683
684 base &= -size;
685 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
686}
687
688static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
689{
690 u32 devres;
691 u32 mask, size, base;
692
693 pci_read_config_dword(dev, port, &devres);
694 if ((devres & enable) != enable)
695 return;
696 base = devres & 0xffff0000;
697 mask = (devres & 0x3f) << 16;
698 size = 128 << 16;
699 for (;;) {
700 unsigned bit = size >> 1;
701 if ((bit & mask) == bit)
702 break;
703 size = bit;
704 }
705
706
707
708
709
710 base &= -size;
711 pci_info(dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
712}
713
714
715
716
717
718
719
720static void quirk_piix4_acpi(struct pci_dev *dev)
721{
722 u32 res_a;
723
724 quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
725 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
726
727
728 pci_read_config_dword(dev, 0x5c, &res_a);
729
730 piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
731 piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
732
733
734
735
736 if (res_a & (1 << 29)) {
737 piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
738 piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
739 }
740
741 if (res_a & (1 << 30)) {
742 piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
743 piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
744 }
745 piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
746 piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
747}
748DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi);
749DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi);
750
751#define ICH_PMBASE 0x40
752#define ICH_ACPI_CNTL 0x44
753#define ICH4_ACPI_EN 0x10
754#define ICH6_ACPI_EN 0x80
755#define ICH4_GPIOBASE 0x58
756#define ICH4_GPIO_CNTL 0x5c
757#define ICH4_GPIO_EN 0x10
758#define ICH6_GPIOBASE 0x48
759#define ICH6_GPIO_CNTL 0x4c
760#define ICH6_GPIO_EN 0x10
761
762
763
764
765
766
767static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
768{
769 u8 enable;
770
771
772
773
774
775
776
777
778 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
779 if (enable & ICH4_ACPI_EN)
780 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
781 "ICH4 ACPI/GPIO/TCO");
782
783 pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
784 if (enable & ICH4_GPIO_EN)
785 quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
786 "ICH4 GPIO");
787}
788DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
789DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
790DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi);
791DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi);
792DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi);
793DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi);
794DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi);
795DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi);
796DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi);
797DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi);
798
799static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
800{
801 u8 enable;
802
803 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
804 if (enable & ICH6_ACPI_EN)
805 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
806 "ICH6 ACPI/GPIO/TCO");
807
808 pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
809 if (enable & ICH6_GPIO_EN)
810 quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
811 "ICH6 GPIO");
812}
813
814static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
815 const char *name, int dynsize)
816{
817 u32 val;
818 u32 size, base;
819
820 pci_read_config_dword(dev, reg, &val);
821
822
823 if (!(val & 1))
824 return;
825 base = val & 0xfffc;
826 if (dynsize) {
827
828
829
830
831
832
833 size = 16;
834 } else {
835 size = 128;
836 }
837 base &= ~(size-1);
838
839
840
841
842
843 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
844}
845
846static void quirk_ich6_lpc(struct pci_dev *dev)
847{
848
849 ich6_lpc_acpi_gpio(dev);
850
851
852 ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
853 ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
854}
855DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
856DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
857
858static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
859 const char *name)
860{
861 u32 val;
862 u32 mask, base;
863
864 pci_read_config_dword(dev, reg, &val);
865
866
867 if (!(val & 1))
868 return;
869
870
871 base = val & 0xfffc;
872 mask = (val >> 16) & 0xfc;
873 mask |= 3;
874
875
876
877
878
879 pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
880}
881
882
883static void quirk_ich7_lpc(struct pci_dev *dev)
884{
885
886 ich6_lpc_acpi_gpio(dev);
887
888
889 ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
890 ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
891 ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
892 ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
893}
894DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc);
895DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc);
896DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc);
897DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc);
898DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc);
899DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc);
900DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc);
901DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc);
902DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc);
903DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc);
904DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc);
905DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc);
906DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc);
907
908
909
910
911
912static void quirk_vt82c586_acpi(struct pci_dev *dev)
913{
914 if (dev->revision & 0x10)
915 quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
916 "vt82c586 ACPI");
917}
918DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi);
919
920
921
922
923
924
925
926static void quirk_vt82c686_acpi(struct pci_dev *dev)
927{
928 quirk_vt82c586_acpi(dev);
929
930 quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
931 "vt82c686 HW-mon");
932
933 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
934}
935DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi);
936
937
938
939
940
941
942static void quirk_vt8235_acpi(struct pci_dev *dev)
943{
944 quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
945 quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
946}
947DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
948
949
950
951
952
953static void quirk_xio2000a(struct pci_dev *dev)
954{
955 struct pci_dev *pdev;
956 u16 command;
957
958 pci_warn(dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
959 list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
960 pci_read_config_word(pdev, PCI_COMMAND, &command);
961 if (command & PCI_COMMAND_FAST_BACK)
962 pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
963 }
964}
965DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
966 quirk_xio2000a);
967
968#ifdef CONFIG_X86_IO_APIC
969
970#include <asm/io_apic.h>
971
972
973
974
975
976
977
978
979static void quirk_via_ioapic(struct pci_dev *dev)
980{
981 u8 tmp;
982
983 if (nr_ioapics < 1)
984 tmp = 0;
985 else
986 tmp = 0x1f;
987
988 pci_info(dev, "%sbling VIA external APIC routing\n",
989 tmp == 0 ? "Disa" : "Ena");
990
991
992 pci_write_config_byte(dev, 0x58, tmp);
993}
994DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
995DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
996
997
998
999
1000
1001
1002
1003static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
1004{
1005 u8 misc_control2;
1006#define BYPASS_APIC_DEASSERT 8
1007
1008 pci_read_config_byte(dev, 0x5B, &misc_control2);
1009 if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
1010 pci_info(dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
1011 pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
1012 }
1013}
1014DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
1015DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026static void quirk_amd_ioapic(struct pci_dev *dev)
1027{
1028 if (dev->revision >= 0x02) {
1029 pci_warn(dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
1030 pci_warn(dev, " : booting with the \"noapic\" option\n");
1031 }
1032}
1033DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
1034#endif
1035
1036#if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS)
1037
1038static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
1039{
1040
1041 if (dev->subsystem_device == 0xa118)
1042 dev->sriov->link = dev->devfn;
1043}
1044DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link);
1045#endif
1046
1047
1048
1049
1050
1051static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
1052{
1053 if (dev->subordinate && dev->revision <= 0x12) {
1054 pci_info(dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
1055 dev->revision);
1056 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
1057 }
1058}
1059DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
1060
1061
1062
1063
1064
1065
1066
1067
1068static void quirk_via_acpi(struct pci_dev *d)
1069{
1070 u8 irq;
1071
1072
1073 pci_read_config_byte(d, 0x42, &irq);
1074 irq &= 0xf;
1075 if (irq && (irq != 2))
1076 d->irq = irq;
1077}
1078DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi);
1079DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi);
1080
1081
1082static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
1083
1084static void quirk_via_bridge(struct pci_dev *dev)
1085{
1086
1087 switch (dev->device) {
1088 case PCI_DEVICE_ID_VIA_82C686:
1089
1090
1091
1092
1093
1094 via_vlink_dev_lo = PCI_SLOT(dev->devfn);
1095 via_vlink_dev_hi = PCI_SLOT(dev->devfn);
1096 break;
1097 case PCI_DEVICE_ID_VIA_8237:
1098 case PCI_DEVICE_ID_VIA_8237A:
1099 via_vlink_dev_lo = 15;
1100 break;
1101 case PCI_DEVICE_ID_VIA_8235:
1102 via_vlink_dev_lo = 16;
1103 break;
1104 case PCI_DEVICE_ID_VIA_8231:
1105 case PCI_DEVICE_ID_VIA_8233_0:
1106 case PCI_DEVICE_ID_VIA_8233A:
1107 case PCI_DEVICE_ID_VIA_8233C_0:
1108 via_vlink_dev_lo = 17;
1109 break;
1110 }
1111}
1112DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_bridge);
1113DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, quirk_via_bridge);
1114DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0, quirk_via_bridge);
1115DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A, quirk_via_bridge);
1116DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233C_0, quirk_via_bridge);
1117DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_bridge);
1118DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge);
1119DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge);
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132static void quirk_via_vlink(struct pci_dev *dev)
1133{
1134 u8 irq, new_irq;
1135
1136
1137 if (via_vlink_dev_lo == -1)
1138 return;
1139
1140 new_irq = dev->irq;
1141
1142
1143 if (!new_irq || new_irq > 15)
1144 return;
1145
1146
1147 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi ||
1148 PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
1149 return;
1150
1151
1152
1153
1154
1155 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1156 if (new_irq != irq) {
1157 pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n",
1158 irq, new_irq);
1159 udelay(15);
1160 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
1161 }
1162}
1163DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
1164
1165
1166
1167
1168
1169
1170static void quirk_vt82c598_id(struct pci_dev *dev)
1171{
1172 pci_write_config_byte(dev, 0xfc, 0);
1173 pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
1174}
1175DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id);
1176
1177
1178
1179
1180
1181
1182
1183static void quirk_cardbus_legacy(struct pci_dev *dev)
1184{
1185 pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
1186}
1187DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1188 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
1189DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
1190 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
1191
1192
1193
1194
1195
1196
1197
1198
1199static void quirk_amd_ordering(struct pci_dev *dev)
1200{
1201 u32 pcic;
1202 pci_read_config_dword(dev, 0x4C, &pcic);
1203 if ((pcic & 6) != 6) {
1204 pcic |= 6;
1205 pci_warn(dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
1206 pci_write_config_dword(dev, 0x4C, pcic);
1207 pci_read_config_dword(dev, 0x84, &pcic);
1208 pcic |= (1 << 23);
1209 pci_write_config_dword(dev, 0x84, pcic);
1210 }
1211}
1212DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1213DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1214
1215
1216
1217
1218
1219
1220
1221
1222static void quirk_dunord(struct pci_dev *dev)
1223{
1224 struct resource *r = &dev->resource[1];
1225
1226 r->flags |= IORESOURCE_UNSET;
1227 r->start = 0;
1228 r->end = 0xffffff;
1229}
1230DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
1231
1232
1233
1234
1235
1236
1237static void quirk_transparent_bridge(struct pci_dev *dev)
1238{
1239 dev->transparent = 1;
1240}
1241DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge);
1242DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge);
1243
1244
1245
1246
1247
1248
1249
1250static void quirk_mediagx_master(struct pci_dev *dev)
1251{
1252 u8 reg;
1253
1254 pci_read_config_byte(dev, 0x41, ®);
1255 if (reg & 2) {
1256 reg &= ~2;
1257 pci_info(dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
1258 reg);
1259 pci_write_config_byte(dev, 0x41, reg);
1260 }
1261}
1262DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1263DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1264
1265
1266
1267
1268
1269
1270static void quirk_disable_pxb(struct pci_dev *pdev)
1271{
1272 u16 config;
1273
1274 if (pdev->revision != 0x04)
1275 return;
1276 pci_read_config_word(pdev, 0x40, &config);
1277 if (config & (1<<6)) {
1278 config &= ~(1<<6);
1279 pci_write_config_word(pdev, 0x40, config);
1280 pci_info(pdev, "C0 revision 450NX. Disabling PCI restreaming\n");
1281 }
1282}
1283DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
1284DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
1285
1286static void quirk_amd_ide_mode(struct pci_dev *pdev)
1287{
1288
1289 u8 tmp;
1290
1291 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
1292 if (tmp == 0x01) {
1293 pci_read_config_byte(pdev, 0x40, &tmp);
1294 pci_write_config_byte(pdev, 0x40, tmp|1);
1295 pci_write_config_byte(pdev, 0x9, 1);
1296 pci_write_config_byte(pdev, 0xa, 6);
1297 pci_write_config_byte(pdev, 0x40, tmp);
1298
1299 pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
1300 pci_info(pdev, "set SATA to AHCI mode\n");
1301 }
1302}
1303DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1304DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1305DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1306DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1307DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1308DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1309DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1310DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1311
1312
1313static void quirk_svwks_csb5ide(struct pci_dev *pdev)
1314{
1315 u8 prog;
1316 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1317 if (prog & 5) {
1318 prog &= ~5;
1319 pdev->class &= ~5;
1320 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1321
1322 }
1323}
1324DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
1325
1326
1327static void quirk_ide_samemode(struct pci_dev *pdev)
1328{
1329 u8 prog;
1330
1331 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1332
1333 if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
1334 pci_info(pdev, "IDE mode mismatch; forcing legacy mode\n");
1335 prog &= ~5;
1336 pdev->class &= ~5;
1337 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1338 }
1339}
1340DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
1341
1342
1343static void quirk_no_ata_d3(struct pci_dev *pdev)
1344{
1345 pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
1346}
1347
1348DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID,
1349 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1350DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
1351 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1352
1353DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
1354 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1355
1356
1357DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
1358 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1359
1360
1361
1362
1363
1364static void quirk_eisa_bridge(struct pci_dev *dev)
1365{
1366 dev->class = PCI_CLASS_BRIDGE_EISA << 8;
1367}
1368DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge);
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395static int asus_hides_smbus;
1396
1397static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
1398{
1399 if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1400 if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
1401 switch (dev->subsystem_device) {
1402 case 0x8025:
1403 case 0x8070:
1404 case 0x8088:
1405 case 0x1626:
1406 asus_hides_smbus = 1;
1407 }
1408 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
1409 switch (dev->subsystem_device) {
1410 case 0x80b1:
1411 case 0x80b2:
1412 case 0x8093:
1413 asus_hides_smbus = 1;
1414 }
1415 else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
1416 switch (dev->subsystem_device) {
1417 case 0x8030:
1418 asus_hides_smbus = 1;
1419 }
1420 else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
1421 switch (dev->subsystem_device) {
1422 case 0x8070:
1423 asus_hides_smbus = 1;
1424 }
1425 else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
1426 switch (dev->subsystem_device) {
1427 case 0x80c9:
1428 asus_hides_smbus = 1;
1429 }
1430 else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
1431 switch (dev->subsystem_device) {
1432 case 0x1751:
1433 case 0x1821:
1434 case 0x1897:
1435 asus_hides_smbus = 1;
1436 }
1437 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1438 switch (dev->subsystem_device) {
1439 case 0x184b:
1440 case 0x186a:
1441 asus_hides_smbus = 1;
1442 }
1443 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1444 switch (dev->subsystem_device) {
1445 case 0x80f2:
1446 asus_hides_smbus = 1;
1447 }
1448 else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
1449 switch (dev->subsystem_device) {
1450 case 0x1882:
1451 case 0x1977:
1452 asus_hides_smbus = 1;
1453 }
1454 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
1455 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1456 switch (dev->subsystem_device) {
1457 case 0x088C:
1458 case 0x0890:
1459 asus_hides_smbus = 1;
1460 }
1461 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1462 switch (dev->subsystem_device) {
1463 case 0x12bc:
1464 case 0x12bd:
1465 case 0x006a:
1466 asus_hides_smbus = 1;
1467 }
1468 else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
1469 switch (dev->subsystem_device) {
1470 case 0x12bf:
1471 asus_hides_smbus = 1;
1472 }
1473 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
1474 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1475 switch (dev->subsystem_device) {
1476 case 0xC00C:
1477 asus_hides_smbus = 1;
1478 }
1479 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
1480 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1481 switch (dev->subsystem_device) {
1482 case 0x0058:
1483 asus_hides_smbus = 1;
1484 }
1485 else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
1486 switch (dev->subsystem_device) {
1487 case 0xB16C:
1488
1489
1490
1491 asus_hides_smbus = 1;
1492 }
1493 else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
1494 switch (dev->subsystem_device) {
1495 case 0x00b8:
1496 case 0x00b9:
1497 case 0x00ba:
1498
1499
1500
1501
1502
1503 asus_hides_smbus = 1;
1504 }
1505 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
1506 switch (dev->subsystem_device) {
1507 case 0x001A:
1508
1509
1510
1511 asus_hides_smbus = 1;
1512 }
1513 }
1514}
1515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge);
1516DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge);
1517DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge);
1518DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge);
1519DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, asus_hides_smbus_hostbridge);
1520DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge);
1521DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge);
1522DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge);
1523DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge);
1524DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
1525
1526DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
1527DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
1528DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
1529
1530static void asus_hides_smbus_lpc(struct pci_dev *dev)
1531{
1532 u16 val;
1533
1534 if (likely(!asus_hides_smbus))
1535 return;
1536
1537 pci_read_config_word(dev, 0xF2, &val);
1538 if (val & 0x8) {
1539 pci_write_config_word(dev, 0xF2, val & (~0x8));
1540 pci_read_config_word(dev, 0xF2, &val);
1541 if (val & 0x8)
1542 pci_info(dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
1543 val);
1544 else
1545 pci_info(dev, "Enabled i801 SMBus device\n");
1546 }
1547}
1548DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1549DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1550DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1551DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1552DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1553DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1554DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1555DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1556DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1557DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1558DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1559DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1560DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1561DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1562
1563
1564static void __iomem *asus_rcba_base;
1565static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
1566{
1567 u32 rcba;
1568
1569 if (likely(!asus_hides_smbus))
1570 return;
1571 WARN_ON(asus_rcba_base);
1572
1573 pci_read_config_dword(dev, 0xF0, &rcba);
1574
1575 asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
1576 if (asus_rcba_base == NULL)
1577 return;
1578}
1579
1580static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
1581{
1582 u32 val;
1583
1584 if (likely(!asus_hides_smbus || !asus_rcba_base))
1585 return;
1586
1587
1588 val = readl(asus_rcba_base + 0x3418);
1589
1590
1591 writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418);
1592}
1593
1594static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
1595{
1596 if (likely(!asus_hides_smbus || !asus_rcba_base))
1597 return;
1598
1599 iounmap(asus_rcba_base);
1600 asus_rcba_base = NULL;
1601 pci_info(dev, "Enabled ICH6/i801 SMBus device\n");
1602}
1603
1604static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1605{
1606 asus_hides_smbus_lpc_ich6_suspend(dev);
1607 asus_hides_smbus_lpc_ich6_resume_early(dev);
1608 asus_hides_smbus_lpc_ich6_resume(dev);
1609}
1610DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6);
1611DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend);
1612DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
1613DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
1614
1615
1616static void quirk_sis_96x_smbus(struct pci_dev *dev)
1617{
1618 u8 val = 0;
1619 pci_read_config_byte(dev, 0x77, &val);
1620 if (val & 0x10) {
1621 pci_info(dev, "Enabling SiS 96x SMBus\n");
1622 pci_write_config_byte(dev, 0x77, val & ~0x10);
1623 }
1624}
1625DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1626DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1627DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1628DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1629DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1630DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1631DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1632DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642#define SIS_DETECT_REGISTER 0x40
1643
1644static void quirk_sis_503(struct pci_dev *dev)
1645{
1646 u8 reg;
1647 u16 devid;
1648
1649 pci_read_config_byte(dev, SIS_DETECT_REGISTER, ®);
1650 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
1651 pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
1652 if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
1653 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
1654 return;
1655 }
1656
1657
1658
1659
1660
1661
1662 dev->device = devid;
1663 quirk_sis_96x_smbus(dev);
1664}
1665DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1666DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1667
1668
1669
1670
1671
1672
1673
1674static void asus_hides_ac97_lpc(struct pci_dev *dev)
1675{
1676 u8 val;
1677 int asus_hides_ac97 = 0;
1678
1679 if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1680 if (dev->device == PCI_DEVICE_ID_VIA_8237)
1681 asus_hides_ac97 = 1;
1682 }
1683
1684 if (!asus_hides_ac97)
1685 return;
1686
1687 pci_read_config_byte(dev, 0x50, &val);
1688 if (val & 0xc0) {
1689 pci_write_config_byte(dev, 0x50, val & (~0xc0));
1690 pci_read_config_byte(dev, 0x50, &val);
1691 if (val & 0xc0)
1692 pci_info(dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
1693 val);
1694 else
1695 pci_info(dev, "Enabled onboard AC97/MC97 devices\n");
1696 }
1697}
1698DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1699DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1700
1701#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
1702
1703
1704
1705
1706
1707static void quirk_jmicron_ata(struct pci_dev *pdev)
1708{
1709 u32 conf1, conf5, class;
1710 u8 hdr;
1711
1712
1713 if (PCI_FUNC(pdev->devfn))
1714 return;
1715
1716 pci_read_config_dword(pdev, 0x40, &conf1);
1717 pci_read_config_dword(pdev, 0x80, &conf5);
1718
1719 conf1 &= ~0x00CFF302;
1720 conf5 &= ~(1 << 24);
1721
1722 switch (pdev->device) {
1723 case PCI_DEVICE_ID_JMICRON_JMB360:
1724 case PCI_DEVICE_ID_JMICRON_JMB362:
1725 case PCI_DEVICE_ID_JMICRON_JMB364:
1726
1727 conf1 |= 0x0002A100;
1728 break;
1729
1730 case PCI_DEVICE_ID_JMICRON_JMB365:
1731 case PCI_DEVICE_ID_JMICRON_JMB366:
1732
1733 conf5 |= (1 << 24);
1734
1735 case PCI_DEVICE_ID_JMICRON_JMB361:
1736 case PCI_DEVICE_ID_JMICRON_JMB363:
1737 case PCI_DEVICE_ID_JMICRON_JMB369:
1738
1739
1740 conf1 |= 0x00C2A1B3;
1741 break;
1742
1743 case PCI_DEVICE_ID_JMICRON_JMB368:
1744
1745 conf1 |= 0x00C00000;
1746 break;
1747 }
1748
1749 pci_write_config_dword(pdev, 0x40, conf1);
1750 pci_write_config_dword(pdev, 0x80, conf5);
1751
1752
1753 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
1754 pdev->hdr_type = hdr & 0x7f;
1755 pdev->multifunction = !!(hdr & 0x80);
1756
1757 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
1758 pdev->class = class >> 8;
1759}
1760DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1761DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1762DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1763DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1764DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1765DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1766DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1767DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1768DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1769DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1770DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1771DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1772DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1773DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1774DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1775DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1776DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1777DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1778
1779#endif
1780
1781static void quirk_jmicron_async_suspend(struct pci_dev *dev)
1782{
1783 if (dev->multifunction) {
1784 device_disable_async_suspend(&dev->dev);
1785 pci_info(dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
1786 }
1787}
1788DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend);
1789DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend);
1790DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend);
1791DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend);
1792
1793#ifdef CONFIG_X86_IO_APIC
1794static void quirk_alder_ioapic(struct pci_dev *pdev)
1795{
1796 int i;
1797
1798 if ((pdev->class >> 8) != 0xff00)
1799 return;
1800
1801
1802
1803
1804
1805
1806 if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
1807 insert_resource(&iomem_resource, &pdev->resource[0]);
1808
1809
1810
1811
1812
1813 for (i = 1; i < 6; i++)
1814 memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
1815}
1816DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
1817#endif
1818
1819static void quirk_pcie_mch(struct pci_dev *pdev)
1820{
1821 pdev->no_msi = 1;
1822}
1823DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
1824DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
1825DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
1826
1827DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
1828
1829
1830
1831
1832
1833static void quirk_pcie_pxh(struct pci_dev *dev)
1834{
1835 dev->no_msi = 1;
1836 pci_warn(dev, "PXH quirk detected; SHPC device MSI disabled\n");
1837}
1838DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh);
1839DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh);
1840DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh);
1841DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh);
1842DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
1843
1844
1845
1846
1847
1848static void quirk_intel_pcie_pm(struct pci_dev *dev)
1849{
1850 pci_pm_d3_delay = 120;
1851 dev->no_d1d2 = 1;
1852}
1853DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
1854DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm);
1855DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm);
1856DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm);
1857DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm);
1858DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm);
1859DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm);
1860DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm);
1861DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm);
1862DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm);
1863DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm);
1864DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm);
1865DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm);
1866DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm);
1867DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm);
1868DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm);
1869DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm);
1870DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm);
1871DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
1872DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
1873DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
1874
1875static void quirk_radeon_pm(struct pci_dev *dev)
1876{
1877 if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1878 dev->subsystem_device == 0x00e2) {
1879 if (dev->d3_delay < 20) {
1880 dev->d3_delay = 20;
1881 pci_info(dev, "extending delay after power-on from D3 to %d msec\n",
1882 dev->d3_delay);
1883 }
1884 }
1885}
1886DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
1887
1888#ifdef CONFIG_X86_IO_APIC
1889static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
1890{
1891 noioapicreroute = 1;
1892 pr_info("%s detected: disable boot interrupt reroute\n", d->ident);
1893
1894 return 0;
1895}
1896
1897static const struct dmi_system_id boot_interrupt_dmi_table[] = {
1898
1899
1900
1901 {
1902 .callback = dmi_disable_ioapicreroute,
1903 .ident = "ASUSTek Computer INC. M2N-LR",
1904 .matches = {
1905 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer INC."),
1906 DMI_MATCH(DMI_PRODUCT_NAME, "M2N-LR"),
1907 },
1908 },
1909 {}
1910};
1911
1912
1913
1914
1915
1916
1917
1918static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
1919{
1920 dmi_check_system(boot_interrupt_dmi_table);
1921 if (noioapicquirk || noioapicreroute)
1922 return;
1923
1924 dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
1925 pci_info(dev, "rerouting interrupts for [%04x:%04x]\n",
1926 dev->vendor, dev->device);
1927}
1928DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1929DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1930DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1931DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1932DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1933DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1934DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1935DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1936DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1937DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1938DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1939DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1940DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1941DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1942DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1943DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954#define INTEL_6300_IOAPIC_ABAR 0x40
1955#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
1956
1957static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
1958{
1959 u16 pci_config_word;
1960
1961 if (noioapicquirk)
1962 return;
1963
1964 pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word);
1965 pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
1966 pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
1967
1968 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
1969 dev->vendor, dev->device);
1970}
1971DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
1972DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
1973
1974
1975#define BC_HT1000_FEATURE_REG 0x64
1976#define BC_HT1000_PIC_REGS_ENABLE (1<<0)
1977#define BC_HT1000_MAP_IDX 0xC00
1978#define BC_HT1000_MAP_DATA 0xC01
1979
1980static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
1981{
1982 u32 pci_config_dword;
1983 u8 irq;
1984
1985 if (noioapicquirk)
1986 return;
1987
1988 pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
1989 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
1990 BC_HT1000_PIC_REGS_ENABLE);
1991
1992 for (irq = 0x10; irq < 0x10 + 32; irq++) {
1993 outb(irq, BC_HT1000_MAP_IDX);
1994 outb(0x00, BC_HT1000_MAP_DATA);
1995 }
1996
1997 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
1998
1999 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2000 dev->vendor, dev->device);
2001}
2002DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
2003DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
2004
2005
2006
2007
2008
2009
2010
2011
2012#define AMD_813X_MISC 0x40
2013#define AMD_813X_NOIOAMODE (1<<0)
2014#define AMD_813X_REV_B1 0x12
2015#define AMD_813X_REV_B2 0x13
2016
2017static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
2018{
2019 u32 pci_config_dword;
2020
2021 if (noioapicquirk)
2022 return;
2023 if ((dev->revision == AMD_813X_REV_B1) ||
2024 (dev->revision == AMD_813X_REV_B2))
2025 return;
2026
2027 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
2028 pci_config_dword &= ~AMD_813X_NOIOAMODE;
2029 pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
2030
2031 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2032 dev->vendor, dev->device);
2033}
2034DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2035DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2036DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2037DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2038
2039#define AMD_8111_PCI_IRQ_ROUTING 0x56
2040
2041static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
2042{
2043 u16 pci_config_word;
2044
2045 if (noioapicquirk)
2046 return;
2047
2048 pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
2049 if (!pci_config_word) {
2050 pci_info(dev, "boot interrupts on device [%04x:%04x] already disabled\n",
2051 dev->vendor, dev->device);
2052 return;
2053 }
2054 pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
2055 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2056 dev->vendor, dev->device);
2057}
2058DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
2059DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
2060#endif
2061
2062
2063
2064
2065
2066
2067static void quirk_tc86c001_ide(struct pci_dev *dev)
2068{
2069 struct resource *r = &dev->resource[0];
2070
2071 if (r->start & 0x8) {
2072 r->flags |= IORESOURCE_UNSET;
2073 r->start = 0;
2074 r->end = 0xf;
2075 }
2076}
2077DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
2078 PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
2079 quirk_tc86c001_ide);
2080
2081
2082
2083
2084
2085
2086
2087
2088static void quirk_plx_pci9050(struct pci_dev *dev)
2089{
2090 unsigned int bar;
2091
2092
2093 if (dev->revision >= 2)
2094 return;
2095 for (bar = 0; bar <= 1; bar++)
2096 if (pci_resource_len(dev, bar) == 0x80 &&
2097 (pci_resource_start(dev, bar) & 0x80)) {
2098 struct resource *r = &dev->resource[bar];
2099 pci_info(dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
2100 bar);
2101 r->flags |= IORESOURCE_UNSET;
2102 r->start = 0;
2103 r->end = 0xff;
2104 }
2105}
2106DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
2107 quirk_plx_pci9050);
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050);
2118DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050);
2119
2120static void quirk_netmos(struct pci_dev *dev)
2121{
2122 unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
2123 unsigned int num_serial = dev->subsystem_device & 0xf;
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135 switch (dev->device) {
2136 case PCI_DEVICE_ID_NETMOS_9835:
2137
2138 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
2139 dev->subsystem_device == 0x0299)
2140 return;
2141
2142 case PCI_DEVICE_ID_NETMOS_9735:
2143 case PCI_DEVICE_ID_NETMOS_9745:
2144 case PCI_DEVICE_ID_NETMOS_9845:
2145 case PCI_DEVICE_ID_NETMOS_9855:
2146 if (num_parallel) {
2147 pci_info(dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
2148 dev->device, num_parallel, num_serial);
2149 dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
2150 (dev->class & 0xff);
2151 }
2152 }
2153}
2154DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
2155 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
2156
2157static void quirk_e100_interrupt(struct pci_dev *dev)
2158{
2159 u16 command, pmcsr;
2160 u8 __iomem *csr;
2161 u8 cmd_hi;
2162
2163 switch (dev->device) {
2164
2165 case 0x1029:
2166 case 0x1030 ... 0x1034:
2167 case 0x1038 ... 0x103E:
2168 case 0x1050 ... 0x1057:
2169 case 0x1059:
2170 case 0x1064 ... 0x106B:
2171 case 0x1091 ... 0x1095:
2172 case 0x1209:
2173 case 0x1229:
2174 case 0x2449:
2175 case 0x2459:
2176 case 0x245D:
2177 case 0x27DC:
2178 break;
2179 default:
2180 return;
2181 }
2182
2183
2184
2185
2186
2187
2188
2189
2190 pci_read_config_word(dev, PCI_COMMAND, &command);
2191
2192 if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
2193 return;
2194
2195
2196
2197
2198
2199 if (dev->pm_cap) {
2200 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2201 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
2202 return;
2203 }
2204
2205
2206 csr = ioremap(pci_resource_start(dev, 0), 8);
2207 if (!csr) {
2208 pci_warn(dev, "Can't map e100 registers\n");
2209 return;
2210 }
2211
2212 cmd_hi = readb(csr + 3);
2213 if (cmd_hi == 0) {
2214 pci_warn(dev, "Firmware left e100 interrupts enabled; disabling\n");
2215 writeb(1, csr + 3);
2216 }
2217
2218 iounmap(csr);
2219}
2220DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
2221 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt);
2222
2223
2224
2225
2226
2227static void quirk_disable_aspm_l0s(struct pci_dev *dev)
2228{
2229 pci_info(dev, "Disabling L0s\n");
2230 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
2231}
2232DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
2233DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
2234DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
2235DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
2236DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
2237DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
2238DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
2239DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
2240DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
2241DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
2242DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
2243DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
2244DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
2245DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
2256{
2257 dev->clear_retrain_link = 1;
2258 pci_info(dev, "Enable PCIe Retrain Link quirk\n");
2259}
2260DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
2261DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
2262DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
2263
2264static void fixup_rev1_53c810(struct pci_dev *dev)
2265{
2266 u32 class = dev->class;
2267
2268
2269
2270
2271
2272 if (class)
2273 return;
2274
2275 dev->class = PCI_CLASS_STORAGE_SCSI << 8;
2276 pci_info(dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n",
2277 class, dev->class);
2278}
2279DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
2280
2281
2282static void quirk_p64h2_1k_io(struct pci_dev *dev)
2283{
2284 u16 en1k;
2285
2286 pci_read_config_word(dev, 0x40, &en1k);
2287
2288 if (en1k & 0x200) {
2289 pci_info(dev, "Enable I/O Space to 1KB granularity\n");
2290 dev->io_window_1k = 1;
2291 }
2292}
2293DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
2294
2295
2296
2297
2298
2299
2300static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
2301{
2302 uint8_t b;
2303
2304 if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
2305 if (!(b & 0x20)) {
2306 pci_write_config_byte(dev, 0xf41, b | 0x20);
2307 pci_info(dev, "Linking AER extended capability\n");
2308 }
2309 }
2310}
2311DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2312 quirk_nvidia_ck804_pcie_aer_ext_cap);
2313DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2314 quirk_nvidia_ck804_pcie_aer_ext_cap);
2315
2316static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
2317{
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329 struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,
2330 PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
2331 uint8_t b;
2332
2333
2334
2335
2336
2337 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
2338 if (!p)
2339 return;
2340 pci_dev_put(p);
2341
2342 if (pci_read_config_byte(dev, 0x76, &b) == 0) {
2343 if (b & 0x40) {
2344
2345 pci_write_config_byte(dev, 0x76, b ^ 0x40);
2346
2347 pci_info(dev, "Disabling VIA CX700 PCI parking\n");
2348 }
2349 }
2350
2351 if (pci_read_config_byte(dev, 0x72, &b) == 0) {
2352 if (b != 0) {
2353
2354 pci_write_config_byte(dev, 0x72, 0x0);
2355
2356
2357 pci_write_config_byte(dev, 0x75, 0x1);
2358
2359
2360 pci_write_config_byte(dev, 0x77, 0x0);
2361
2362 pci_info(dev, "Disabling VIA CX700 PCI caching\n");
2363 }
2364 }
2365}
2366DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
2367
2368static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
2369{
2370 u32 rev;
2371
2372 pci_read_config_dword(dev, 0xf4, &rev);
2373
2374
2375 if (rev == 0x05719000) {
2376 int readrq = pcie_get_readrq(dev);
2377 if (readrq > 2048)
2378 pcie_set_readrq(dev, 2048);
2379 }
2380}
2381DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
2382 PCI_DEVICE_ID_TIGON3_5719,
2383 quirk_brcm_5719_limit_mrrs);
2384
2385#ifdef CONFIG_PCIE_IPROC_PLATFORM
2386static void quirk_paxc_bridge(struct pci_dev *pdev)
2387{
2388
2389
2390
2391
2392
2393 if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
2394 pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
2395
2396
2397
2398
2399
2400
2401
2402 pdev->pcie_mpss = 2;
2403}
2404DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
2405DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
2406DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
2407DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
2408DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
2409#endif
2410
2411
2412
2413
2414
2415
2416
2417static void quirk_unhide_mch_dev6(struct pci_dev *dev)
2418{
2419 u8 reg;
2420
2421 if (pci_read_config_byte(dev, 0xF4, ®) == 0 && !(reg & 0x02)) {
2422 pci_info(dev, "Enabling MCH 'Overflow' Device\n");
2423 pci_write_config_byte(dev, 0xF4, reg | 0x02);
2424 }
2425}
2426DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
2427 quirk_unhide_mch_dev6);
2428DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
2429 quirk_unhide_mch_dev6);
2430
2431#ifdef CONFIG_PCI_MSI
2432
2433
2434
2435
2436
2437
2438
2439static void quirk_disable_all_msi(struct pci_dev *dev)
2440{
2441 pci_no_msi();
2442 pci_warn(dev, "MSI quirk detected; MSI disabled\n");
2443}
2444DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
2445DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
2446DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
2447DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
2448DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
2449DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
2450DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
2451DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
2452
2453
2454static void quirk_disable_msi(struct pci_dev *dev)
2455{
2456 if (dev->subordinate) {
2457 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2458 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2459 }
2460}
2461DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
2462DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
2463DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
2464
2465
2466
2467
2468
2469
2470
2471static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
2472{
2473 struct pci_dev *apc_bridge;
2474
2475 apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0));
2476 if (apc_bridge) {
2477 if (apc_bridge->device == 0x9602)
2478 quirk_disable_msi(apc_bridge);
2479 pci_dev_put(apc_bridge);
2480 }
2481}
2482DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
2483DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
2484
2485
2486
2487
2488
2489static int msi_ht_cap_enabled(struct pci_dev *dev)
2490{
2491 int pos, ttl = PCI_FIND_CAP_TTL;
2492
2493 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2494 while (pos && ttl--) {
2495 u8 flags;
2496
2497 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2498 &flags) == 0) {
2499 pci_info(dev, "Found %s HT MSI Mapping\n",
2500 flags & HT_MSI_FLAGS_ENABLE ?
2501 "enabled" : "disabled");
2502 return (flags & HT_MSI_FLAGS_ENABLE) != 0;
2503 }
2504
2505 pos = pci_find_next_ht_capability(dev, pos,
2506 HT_CAPTYPE_MSI_MAPPING);
2507 }
2508 return 0;
2509}
2510
2511
2512static void quirk_msi_ht_cap(struct pci_dev *dev)
2513{
2514 if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
2515 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2516 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2517 }
2518}
2519DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
2520 quirk_msi_ht_cap);
2521
2522
2523
2524
2525
2526static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
2527{
2528 struct pci_dev *pdev;
2529
2530 if (!dev->subordinate)
2531 return;
2532
2533
2534
2535
2536
2537 pdev = pci_get_slot(dev->bus, 0);
2538 if (!pdev)
2539 return;
2540 if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
2541 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2542 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2543 }
2544 pci_dev_put(pdev);
2545}
2546DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2547 quirk_nvidia_ck804_msi_ht_cap);
2548
2549
2550static void ht_enable_msi_mapping(struct pci_dev *dev)
2551{
2552 int pos, ttl = PCI_FIND_CAP_TTL;
2553
2554 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2555 while (pos && ttl--) {
2556 u8 flags;
2557
2558 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2559 &flags) == 0) {
2560 pci_info(dev, "Enabling HT MSI Mapping\n");
2561
2562 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2563 flags | HT_MSI_FLAGS_ENABLE);
2564 }
2565 pos = pci_find_next_ht_capability(dev, pos,
2566 HT_CAPTYPE_MSI_MAPPING);
2567 }
2568}
2569DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
2570 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
2571 ht_enable_msi_mapping);
2572DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
2573 ht_enable_msi_mapping);
2574
2575
2576
2577
2578
2579
2580static void nvenet_msi_disable(struct pci_dev *dev)
2581{
2582 const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
2583
2584 if (board_name &&
2585 (strstr(board_name, "P5N32-SLI PREMIUM") ||
2586 strstr(board_name, "P5N32-E SLI"))) {
2587 pci_info(dev, "Disabling MSI for MCP55 NIC on P5N32-SLI\n");
2588 dev->no_msi = 1;
2589 }
2590}
2591DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2592 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2593 nvenet_msi_disable);
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
2606{
2607 u32 cfg;
2608
2609 if (!pci_find_capability(dev, PCI_CAP_ID_HT))
2610 return;
2611
2612 pci_read_config_dword(dev, 0x74, &cfg);
2613
2614 if (cfg & ((1 << 2) | (1 << 15))) {
2615 pr_info("Rewriting IRQ routing register on MCP55\n");
2616 cfg &= ~((1 << 2) | (1 << 15));
2617 pci_write_config_dword(dev, 0x74, cfg);
2618 }
2619}
2620DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2621 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
2622 nvbridge_check_legacy_irq_routing);
2623DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2624 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
2625 nvbridge_check_legacy_irq_routing);
2626
2627static int ht_check_msi_mapping(struct pci_dev *dev)
2628{
2629 int pos, ttl = PCI_FIND_CAP_TTL;
2630 int found = 0;
2631
2632
2633 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2634 while (pos && ttl--) {
2635 u8 flags;
2636
2637 if (found < 1)
2638 found = 1;
2639 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2640 &flags) == 0) {
2641 if (flags & HT_MSI_FLAGS_ENABLE) {
2642 if (found < 2) {
2643 found = 2;
2644 break;
2645 }
2646 }
2647 }
2648 pos = pci_find_next_ht_capability(dev, pos,
2649 HT_CAPTYPE_MSI_MAPPING);
2650 }
2651
2652 return found;
2653}
2654
2655static int host_bridge_with_leaf(struct pci_dev *host_bridge)
2656{
2657 struct pci_dev *dev;
2658 int pos;
2659 int i, dev_no;
2660 int found = 0;
2661
2662 dev_no = host_bridge->devfn >> 3;
2663 for (i = dev_no + 1; i < 0x20; i++) {
2664 dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
2665 if (!dev)
2666 continue;
2667
2668
2669 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2670 if (pos != 0) {
2671 pci_dev_put(dev);
2672 break;
2673 }
2674
2675 if (ht_check_msi_mapping(dev)) {
2676 found = 1;
2677 pci_dev_put(dev);
2678 break;
2679 }
2680 pci_dev_put(dev);
2681 }
2682
2683 return found;
2684}
2685
2686#define PCI_HT_CAP_SLAVE_CTRL0 4
2687#define PCI_HT_CAP_SLAVE_CTRL1 8
2688
2689static int is_end_of_ht_chain(struct pci_dev *dev)
2690{
2691 int pos, ctrl_off;
2692 int end = 0;
2693 u16 flags, ctrl;
2694
2695 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2696
2697 if (!pos)
2698 goto out;
2699
2700 pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
2701
2702 ctrl_off = ((flags >> 10) & 1) ?
2703 PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
2704 pci_read_config_word(dev, pos + ctrl_off, &ctrl);
2705
2706 if (ctrl & (1 << 6))
2707 end = 1;
2708
2709out:
2710 return end;
2711}
2712
2713static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
2714{
2715 struct pci_dev *host_bridge;
2716 int pos;
2717 int i, dev_no;
2718 int found = 0;
2719
2720 dev_no = dev->devfn >> 3;
2721 for (i = dev_no; i >= 0; i--) {
2722 host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
2723 if (!host_bridge)
2724 continue;
2725
2726 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2727 if (pos != 0) {
2728 found = 1;
2729 break;
2730 }
2731 pci_dev_put(host_bridge);
2732 }
2733
2734 if (!found)
2735 return;
2736
2737
2738 if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
2739 host_bridge_with_leaf(host_bridge))
2740 goto out;
2741
2742
2743 if (msi_ht_cap_enabled(host_bridge))
2744 goto out;
2745
2746 ht_enable_msi_mapping(dev);
2747
2748out:
2749 pci_dev_put(host_bridge);
2750}
2751
2752static void ht_disable_msi_mapping(struct pci_dev *dev)
2753{
2754 int pos, ttl = PCI_FIND_CAP_TTL;
2755
2756 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2757 while (pos && ttl--) {
2758 u8 flags;
2759
2760 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2761 &flags) == 0) {
2762 pci_info(dev, "Disabling HT MSI Mapping\n");
2763
2764 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2765 flags & ~HT_MSI_FLAGS_ENABLE);
2766 }
2767 pos = pci_find_next_ht_capability(dev, pos,
2768 HT_CAPTYPE_MSI_MAPPING);
2769 }
2770}
2771
2772static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
2773{
2774 struct pci_dev *host_bridge;
2775 int pos;
2776 int found;
2777
2778 if (!pci_msi_enabled())
2779 return;
2780
2781
2782 found = ht_check_msi_mapping(dev);
2783
2784
2785 if (found == 0)
2786 return;
2787
2788
2789
2790
2791
2792 host_bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), 0,
2793 PCI_DEVFN(0, 0));
2794 if (host_bridge == NULL) {
2795 pci_warn(dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
2796 return;
2797 }
2798
2799 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2800 if (pos != 0) {
2801
2802 if (found == 1) {
2803
2804 if (all)
2805 ht_enable_msi_mapping(dev);
2806 else
2807 nv_ht_enable_msi_mapping(dev);
2808 }
2809 goto out;
2810 }
2811
2812
2813 if (found == 1)
2814 goto out;
2815
2816
2817 ht_disable_msi_mapping(dev);
2818
2819out:
2820 pci_dev_put(host_bridge);
2821}
2822
2823static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
2824{
2825 return __nv_msi_ht_cap_quirk(dev, 1);
2826}
2827DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2828DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2829
2830static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
2831{
2832 return __nv_msi_ht_cap_quirk(dev, 0);
2833}
2834DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2835DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2836
2837static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
2838{
2839 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2840}
2841
2842static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
2843{
2844 struct pci_dev *p;
2845
2846
2847
2848
2849
2850
2851 p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
2852 NULL);
2853 if (!p)
2854 return;
2855
2856 if ((p->revision < 0x3B) && (p->revision >= 0x30))
2857 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2858 pci_dev_put(p);
2859}
2860
2861static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
2862{
2863
2864 if (dev->revision < 0x18) {
2865 pci_info(dev, "set MSI_INTX_DISABLE_BUG flag\n");
2866 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2867 }
2868}
2869DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2870 PCI_DEVICE_ID_TIGON3_5780,
2871 quirk_msi_intx_disable_bug);
2872DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2873 PCI_DEVICE_ID_TIGON3_5780S,
2874 quirk_msi_intx_disable_bug);
2875DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2876 PCI_DEVICE_ID_TIGON3_5714,
2877 quirk_msi_intx_disable_bug);
2878DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2879 PCI_DEVICE_ID_TIGON3_5714S,
2880 quirk_msi_intx_disable_bug);
2881DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2882 PCI_DEVICE_ID_TIGON3_5715,
2883 quirk_msi_intx_disable_bug);
2884DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2885 PCI_DEVICE_ID_TIGON3_5715S,
2886 quirk_msi_intx_disable_bug);
2887
2888DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390,
2889 quirk_msi_intx_disable_ati_bug);
2890DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391,
2891 quirk_msi_intx_disable_ati_bug);
2892DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392,
2893 quirk_msi_intx_disable_ati_bug);
2894DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393,
2895 quirk_msi_intx_disable_ati_bug);
2896DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394,
2897 quirk_msi_intx_disable_ati_bug);
2898
2899DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373,
2900 quirk_msi_intx_disable_bug);
2901DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
2902 quirk_msi_intx_disable_bug);
2903DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
2904 quirk_msi_intx_disable_bug);
2905
2906DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
2907 quirk_msi_intx_disable_bug);
2908DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
2909 quirk_msi_intx_disable_bug);
2910DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
2911 quirk_msi_intx_disable_bug);
2912DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
2913 quirk_msi_intx_disable_bug);
2914DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
2915 quirk_msi_intx_disable_bug);
2916DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
2917 quirk_msi_intx_disable_bug);
2918DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090,
2919 quirk_msi_intx_disable_qca_bug);
2920DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091,
2921 quirk_msi_intx_disable_qca_bug);
2922DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0,
2923 quirk_msi_intx_disable_qca_bug);
2924DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
2925 quirk_msi_intx_disable_qca_bug);
2926DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
2927 quirk_msi_intx_disable_qca_bug);
2928#endif
2929
2930
2931
2932
2933
2934
2935
2936
2937static void quirk_hotplug_bridge(struct pci_dev *dev)
2938{
2939 dev->is_hotplug_bridge = 1;
2940}
2941DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968#ifdef CONFIG_MMC_RICOH_MMC
2969static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
2970{
2971 u8 write_enable;
2972 u8 write_target;
2973 u8 disable;
2974
2975
2976
2977
2978
2979
2980 if (PCI_FUNC(dev->devfn))
2981 return;
2982
2983 pci_read_config_byte(dev, 0xB7, &disable);
2984 if (disable & 0x02)
2985 return;
2986
2987 pci_read_config_byte(dev, 0x8E, &write_enable);
2988 pci_write_config_byte(dev, 0x8E, 0xAA);
2989 pci_read_config_byte(dev, 0x8D, &write_target);
2990 pci_write_config_byte(dev, 0x8D, 0xB7);
2991 pci_write_config_byte(dev, 0xB7, disable | 0x02);
2992 pci_write_config_byte(dev, 0x8E, write_enable);
2993 pci_write_config_byte(dev, 0x8D, write_target);
2994
2995 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via CardBus function)\n");
2996 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
2997}
2998DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
2999DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
3000
3001static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
3002{
3003 u8 write_enable;
3004 u8 disable;
3005
3006
3007
3008
3009
3010
3011 if (PCI_FUNC(dev->devfn))
3012 return;
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025 if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
3026 dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
3027 pci_write_config_byte(dev, 0xf9, 0xfc);
3028 pci_write_config_byte(dev, 0x150, 0x10);
3029 pci_write_config_byte(dev, 0xf9, 0x00);
3030 pci_write_config_byte(dev, 0xfc, 0x01);
3031 pci_write_config_byte(dev, 0xe1, 0x32);
3032 pci_write_config_byte(dev, 0xfc, 0x00);
3033
3034 pci_notice(dev, "MMC controller base frequency changed to 50Mhz.\n");
3035 }
3036
3037 pci_read_config_byte(dev, 0xCB, &disable);
3038
3039 if (disable & 0x02)
3040 return;
3041
3042 pci_read_config_byte(dev, 0xCA, &write_enable);
3043 pci_write_config_byte(dev, 0xCA, 0x57);
3044 pci_write_config_byte(dev, 0xCB, disable | 0x02);
3045 pci_write_config_byte(dev, 0xCA, write_enable);
3046
3047 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via FireWire function)\n");
3048 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
3049
3050}
3051DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
3052DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
3053DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
3054DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
3055DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
3056DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
3057#endif
3058
3059#ifdef CONFIG_DMAR_TABLE
3060#define VTUNCERRMSK_REG 0x1ac
3061#define VTD_MSK_SPEC_ERRORS (1 << 31)
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072static void vtd_mask_spec_errors(struct pci_dev *dev)
3073{
3074 u32 word;
3075
3076 pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
3077 pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
3078}
3079DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
3080DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
3081#endif
3082
3083static void fixup_ti816x_class(struct pci_dev *dev)
3084{
3085 u32 class = dev->class;
3086
3087
3088 dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
3089 pci_info(dev, "PCI class overridden (%#08x -> %#08x)\n",
3090 class, dev->class);
3091}
3092DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
3093 PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
3094
3095
3096
3097
3098
3099static void fixup_mpss_256(struct pci_dev *dev)
3100{
3101 dev->pcie_mpss = 1;
3102}
3103DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3104 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
3105DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3106 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
3107DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3108 PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118static void quirk_intel_mc_errata(struct pci_dev *dev)
3119{
3120 int err;
3121 u16 rcc;
3122
3123 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
3124 pcie_bus_config == PCIE_BUS_DEFAULT)
3125 return;
3126
3127
3128
3129
3130
3131
3132 err = pci_read_config_word(dev, 0x48, &rcc);
3133 if (err) {
3134 pci_err(dev, "Error attempting to read the read completion coalescing register\n");
3135 return;
3136 }
3137
3138 if (!(rcc & (1 << 10)))
3139 return;
3140
3141 rcc &= ~(1 << 10);
3142
3143 err = pci_write_config_word(dev, 0x48, rcc);
3144 if (err) {
3145 pci_err(dev, "Error attempting to write the read completion coalescing register\n");
3146 return;
3147 }
3148
3149 pr_info_once("Read completion coalescing disabled due to hardware erratum relating to 256B MPS\n");
3150}
3151
3152DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
3153DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
3154DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
3155DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
3156DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
3157DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
3158DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
3159DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
3160DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
3161DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
3162DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
3163DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
3164DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
3165DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
3166
3167DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
3168DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
3169DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
3170DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
3171DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
3172DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
3173DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
3174DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
3175DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
3176DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
3177DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
3178
3179
3180
3181
3182
3183
3184static void quirk_intel_ntb(struct pci_dev *dev)
3185{
3186 int rc;
3187 u8 val;
3188
3189 rc = pci_read_config_byte(dev, 0x00D0, &val);
3190 if (rc)
3191 return;
3192
3193 dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
3194
3195 rc = pci_read_config_byte(dev, 0x00D1, &val);
3196 if (rc)
3197 return;
3198
3199 dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
3200}
3201DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
3202DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216#define I915_DEIER_REG 0x4400c
3217static void disable_igfx_irq(struct pci_dev *dev)
3218{
3219 void __iomem *regs = pci_iomap(dev, 0, 0);
3220 if (regs == NULL) {
3221 pci_warn(dev, "igfx quirk: Can't iomap PCI device\n");
3222 return;
3223 }
3224
3225
3226 if (readl(regs + I915_DEIER_REG) != 0) {
3227 pci_warn(dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
3228
3229 writel(0, regs + I915_DEIER_REG);
3230 }
3231
3232 pci_iounmap(dev, regs);
3233}
3234DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq);
3235DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq);
3236DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq);
3237DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
3238DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq);
3239DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
3240DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
3241
3242
3243
3244
3245
3246static void quirk_remove_d3_delay(struct pci_dev *dev)
3247{
3248 dev->d3_delay = 0;
3249}
3250
3251DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
3252DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
3253DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
3254
3255DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
3256DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
3257DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
3258DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
3259DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
3260DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
3261DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
3262DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
3263DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
3264DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
3265DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
3266
3267DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3_delay);
3268DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
3269DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
3270DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3_delay);
3271DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
3272DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
3273DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3_delay);
3274DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3_delay);
3275DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3_delay);
3276
3277
3278
3279
3280
3281
3282static void quirk_broken_intx_masking(struct pci_dev *dev)
3283{
3284 dev->broken_intx_masking = 1;
3285}
3286DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030,
3287 quirk_broken_intx_masking);
3288DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601,
3289 quirk_broken_intx_masking);
3290DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004,
3291 quirk_broken_intx_masking);
3292
3293
3294
3295
3296
3297
3298
3299DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
3300 quirk_broken_intx_masking);
3301
3302
3303
3304
3305
3306DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, quirk_broken_intx_masking);
3307DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, quirk_broken_intx_masking);
3308DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, quirk_broken_intx_masking);
3309DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, quirk_broken_intx_masking);
3310DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, quirk_broken_intx_masking);
3311DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, quirk_broken_intx_masking);
3312DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, quirk_broken_intx_masking);
3313DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, quirk_broken_intx_masking);
3314DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, quirk_broken_intx_masking);
3315DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking);
3316DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking);
3317DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, quirk_broken_intx_masking);
3318DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, quirk_broken_intx_masking);
3319DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking);
3320DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, quirk_broken_intx_masking);
3321DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, quirk_broken_intx_masking);
3322
3323static u16 mellanox_broken_intx_devs[] = {
3324 PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
3325 PCI_DEVICE_ID_MELLANOX_HERMON_DDR,
3326 PCI_DEVICE_ID_MELLANOX_HERMON_QDR,
3327 PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2,
3328 PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2,
3329 PCI_DEVICE_ID_MELLANOX_HERMON_EN,
3330 PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2,
3331 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN,
3332 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2,
3333 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2,
3334 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2,
3335 PCI_DEVICE_ID_MELLANOX_CONNECTX2,
3336 PCI_DEVICE_ID_MELLANOX_CONNECTX3,
3337 PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO,
3338};
3339
3340#define CONNECTX_4_CURR_MAX_MINOR 99
3341#define CONNECTX_4_INTX_SUPPORT_MINOR 14
3342
3343
3344
3345
3346
3347
3348
3349static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
3350{
3351 __be32 __iomem *fw_ver;
3352 u16 fw_major;
3353 u16 fw_minor;
3354 u16 fw_subminor;
3355 u32 fw_maj_min;
3356 u32 fw_sub_min;
3357 int i;
3358
3359 for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) {
3360 if (pdev->device == mellanox_broken_intx_devs[i]) {
3361 pdev->broken_intx_masking = 1;
3362 return;
3363 }
3364 }
3365
3366
3367
3368
3369
3370 if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
3371 return;
3372
3373 if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 &&
3374 pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX)
3375 return;
3376
3377
3378 if (pci_enable_device_mem(pdev)) {
3379 pci_warn(pdev, "Can't enable device memory\n");
3380 return;
3381 }
3382
3383 fw_ver = ioremap(pci_resource_start(pdev, 0), 4);
3384 if (!fw_ver) {
3385 pci_warn(pdev, "Can't map ConnectX-4 initialization segment\n");
3386 goto out;
3387 }
3388
3389
3390 fw_maj_min = ioread32be(fw_ver);
3391 fw_sub_min = ioread32be(fw_ver + 1);
3392 fw_major = fw_maj_min & 0xffff;
3393 fw_minor = fw_maj_min >> 16;
3394 fw_subminor = fw_sub_min & 0xffff;
3395 if (fw_minor > CONNECTX_4_CURR_MAX_MINOR ||
3396 fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) {
3397 pci_warn(pdev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n",
3398 fw_major, fw_minor, fw_subminor, pdev->device ==
3399 PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14);
3400 pdev->broken_intx_masking = 1;
3401 }
3402
3403 iounmap(fw_ver);
3404
3405out:
3406 pci_disable_device(pdev);
3407}
3408DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
3409 mellanox_check_broken_intx_masking);
3410
3411static void quirk_no_bus_reset(struct pci_dev *dev)
3412{
3413 dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
3414}
3415
3416
3417
3418
3419
3420
3421
3422
3423DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
3424DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
3425DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
3426DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
3427DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
3428
3429
3430
3431
3432
3433
3434DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
3435
3436static void quirk_no_pm_reset(struct pci_dev *dev)
3437{
3438
3439
3440
3441
3442 if (!pci_is_root_bus(dev->bus))
3443 dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
3444}
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
3455 PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
3456
3457
3458
3459
3460
3461
3462static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
3463{
3464 if (pdev->is_hotplug_bridge &&
3465 (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
3466 pdev->revision <= 1))
3467 pdev->no_msi = 1;
3468}
3469DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
3470 quirk_thunderbolt_hotplug_msi);
3471DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE,
3472 quirk_thunderbolt_hotplug_msi);
3473DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK,
3474 quirk_thunderbolt_hotplug_msi);
3475DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3476 quirk_thunderbolt_hotplug_msi);
3477DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
3478 quirk_thunderbolt_hotplug_msi);
3479
3480#ifdef CONFIG_ACPI
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
3497{
3498 acpi_handle bridge, SXIO, SXFP, SXLV;
3499
3500 if (!x86_apple_machine)
3501 return;
3502 if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
3503 return;
3504 bridge = ACPI_HANDLE(&dev->dev);
3505 if (!bridge)
3506 return;
3507
3508
3509
3510
3511
3512
3513
3514
3515 if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
3516 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
3517 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
3518 return;
3519 pci_info(dev, "quirk: cutting power to Thunderbolt controller...\n");
3520
3521
3522 acpi_execute_simple_method(SXIO, NULL, 1);
3523 acpi_execute_simple_method(SXFP, NULL, 0);
3524 msleep(300);
3525 acpi_execute_simple_method(SXLV, NULL, 0);
3526 acpi_execute_simple_method(SXIO, NULL, 0);
3527 acpi_execute_simple_method(SXLV, NULL, 0);
3528}
3529DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
3530 PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3531 quirk_apple_poweroff_thunderbolt);
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
3543{
3544 struct pci_dev *sibling = NULL;
3545 struct pci_dev *nhi = NULL;
3546
3547 if (!x86_apple_machine)
3548 return;
3549 if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
3550 return;
3551
3552
3553
3554
3555
3556 sibling = pci_get_slot(dev->bus, 0x0);
3557 if (sibling == dev)
3558 goto out;
3559 if (!sibling || !sibling->subordinate)
3560 goto out;
3561 nhi = pci_get_slot(sibling->subordinate, 0x0);
3562 if (!nhi)
3563 goto out;
3564 if (nhi->vendor != PCI_VENDOR_ID_INTEL
3565 || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
3566 nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
3567 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI &&
3568 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
3569 || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
3570 goto out;
3571 pci_info(dev, "quirk: waiting for Thunderbolt to reestablish PCI tunnels...\n");
3572 device_pm_wait_for_dev(&dev->dev, &nhi->dev);
3573out:
3574 pci_dev_put(nhi);
3575 pci_dev_put(sibling);
3576}
3577DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3578 PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
3579 quirk_apple_wait_for_thunderbolt);
3580DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3581 PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3582 quirk_apple_wait_for_thunderbolt);
3583DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3584 PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE,
3585 quirk_apple_wait_for_thunderbolt);
3586DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3587 PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
3588 quirk_apple_wait_for_thunderbolt);
3589#endif
3590
3591
3592
3593
3594
3595
3596static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
3597{
3598
3599
3600
3601
3602
3603
3604
3605
3606 if (!probe)
3607 pcie_flr(dev);
3608 return 0;
3609}
3610
3611#define SOUTH_CHICKEN2 0xc2004
3612#define PCH_PP_STATUS 0xc7200
3613#define PCH_PP_CONTROL 0xc7204
3614#define MSG_CTL 0x45010
3615#define NSDE_PWR_STATE 0xd0100
3616#define IGD_OPERATION_TIMEOUT 10000
3617
3618static int reset_ivb_igd(struct pci_dev *dev, int probe)
3619{
3620 void __iomem *mmio_base;
3621 unsigned long timeout;
3622 u32 val;
3623
3624 if (probe)
3625 return 0;
3626
3627 mmio_base = pci_iomap(dev, 0, 0);
3628 if (!mmio_base)
3629 return -ENOMEM;
3630
3631 iowrite32(0x00000002, mmio_base + MSG_CTL);
3632
3633
3634
3635
3636
3637
3638
3639 iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2);
3640
3641 val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe;
3642 iowrite32(val, mmio_base + PCH_PP_CONTROL);
3643
3644 timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT);
3645 do {
3646 val = ioread32(mmio_base + PCH_PP_STATUS);
3647 if ((val & 0xb0000000) == 0)
3648 goto reset_complete;
3649 msleep(10);
3650 } while (time_before(jiffies, timeout));
3651 pci_warn(dev, "timeout during reset\n");
3652
3653reset_complete:
3654 iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
3655
3656 pci_iounmap(dev, mmio_base);
3657 return 0;
3658}
3659
3660
3661static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
3662{
3663 u16 old_command;
3664 u16 msix_flags;
3665
3666
3667
3668
3669
3670 if ((dev->device & 0xf000) != 0x4000)
3671 return -ENOTTY;
3672
3673
3674
3675
3676
3677 if (probe)
3678 return 0;
3679
3680
3681
3682
3683
3684
3685
3686 pci_read_config_word(dev, PCI_COMMAND, &old_command);
3687 pci_write_config_word(dev, PCI_COMMAND,
3688 old_command | PCI_COMMAND_MASTER);
3689
3690
3691
3692
3693
3694 pci_save_state(dev);
3695
3696
3697
3698
3699
3700
3701
3702
3703 pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
3704 if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
3705 pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
3706 msix_flags |
3707 PCI_MSIX_FLAGS_ENABLE |
3708 PCI_MSIX_FLAGS_MASKALL);
3709
3710 pcie_flr(dev);
3711
3712
3713
3714
3715
3716
3717 pci_restore_state(dev);
3718 pci_write_config_word(dev, PCI_COMMAND, old_command);
3719 return 0;
3720}
3721
3722#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
3723#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
3724#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739static int nvme_disable_and_flr(struct pci_dev *dev, int probe)
3740{
3741 void __iomem *bar;
3742 u16 cmd;
3743 u32 cfg;
3744
3745 if (dev->class != PCI_CLASS_STORAGE_EXPRESS ||
3746 !pcie_has_flr(dev) || !pci_resource_start(dev, 0))
3747 return -ENOTTY;
3748
3749 if (probe)
3750 return 0;
3751
3752 bar = pci_iomap(dev, 0, NVME_REG_CC + sizeof(cfg));
3753 if (!bar)
3754 return -ENOTTY;
3755
3756 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3757 pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MEMORY);
3758
3759 cfg = readl(bar + NVME_REG_CC);
3760
3761
3762 if (cfg & NVME_CC_ENABLE) {
3763 u32 cap = readl(bar + NVME_REG_CAP);
3764 unsigned long timeout;
3765
3766
3767
3768
3769
3770
3771 cfg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
3772
3773 writel(cfg, bar + NVME_REG_CC);
3774
3775
3776
3777
3778
3779
3780
3781
3782 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
3783
3784 for (;;) {
3785 u32 status = readl(bar + NVME_REG_CSTS);
3786
3787
3788 if (!(status & NVME_CSTS_RDY))
3789 break;
3790
3791 msleep(100);
3792
3793 if (time_after(jiffies, timeout)) {
3794 pci_warn(dev, "Timeout waiting for NVMe ready status to clear after disable\n");
3795 break;
3796 }
3797 }
3798 }
3799
3800 pci_iounmap(dev, bar);
3801
3802 pcie_flr(dev);
3803
3804 return 0;
3805}
3806
3807
3808
3809
3810
3811
3812
3813static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
3814{
3815 if (!pcie_has_flr(dev))
3816 return -ENOTTY;
3817
3818 if (probe)
3819 return 0;
3820
3821 pcie_flr(dev);
3822
3823 msleep(250);
3824
3825 return 0;
3826}
3827
3828static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
3829 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
3830 reset_intel_82599_sfp_virtfn },
3831 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
3832 reset_ivb_igd },
3833 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
3834 reset_ivb_igd },
3835 { PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr },
3836 { PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
3837 { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
3838 reset_chelsio_generic_dev },
3839 { 0 }
3840};
3841
3842
3843
3844
3845
3846
3847int pci_dev_specific_reset(struct pci_dev *dev, int probe)
3848{
3849 const struct pci_dev_reset_methods *i;
3850
3851 for (i = pci_dev_reset_methods; i->reset; i++) {
3852 if ((i->vendor == dev->vendor ||
3853 i->vendor == (u16)PCI_ANY_ID) &&
3854 (i->device == dev->device ||
3855 i->device == (u16)PCI_ANY_ID))
3856 return i->reset(dev, probe);
3857 }
3858
3859 return -ENOTTY;
3860}
3861
3862static void quirk_dma_func0_alias(struct pci_dev *dev)
3863{
3864 if (PCI_FUNC(dev->devfn) != 0)
3865 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
3866}
3867
3868
3869
3870
3871
3872
3873DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
3874DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
3875
3876static void quirk_dma_func1_alias(struct pci_dev *dev)
3877{
3878 if (PCI_FUNC(dev->devfn) != 1)
3879 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1));
3880}
3881
3882
3883
3884
3885
3886
3887
3888DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
3889 quirk_dma_func1_alias);
3890DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
3891 quirk_dma_func1_alias);
3892DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
3893 quirk_dma_func1_alias);
3894
3895DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
3896 quirk_dma_func1_alias);
3897DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
3898 quirk_dma_func1_alias);
3899
3900DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
3901 quirk_dma_func1_alias);
3902
3903DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
3904 quirk_dma_func1_alias);
3905
3906DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
3907 quirk_dma_func1_alias);
3908
3909DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183,
3910 quirk_dma_func1_alias);
3911
3912DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
3913 quirk_dma_func1_alias);
3914
3915DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
3916 quirk_dma_func1_alias);
3917
3918DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
3919 quirk_dma_func1_alias);
3920DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
3921 quirk_dma_func1_alias);
3922DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
3923 quirk_dma_func1_alias);
3924
3925DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
3926 PCI_DEVICE_ID_JMICRON_JMB388_ESD,
3927 quirk_dma_func1_alias);
3928
3929DECLARE_PCI_FIXUP_HEADER(0x1c28,
3930 0x0122,
3931 quirk_dma_func1_alias);
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948static const struct pci_device_id fixed_dma_alias_tbl[] = {
3949 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
3950 PCI_VENDOR_ID_ADAPTEC2, 0x02bb),
3951 .driver_data = PCI_DEVFN(1, 0) },
3952 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
3953 PCI_VENDOR_ID_ADAPTEC2, 0x02bc),
3954 .driver_data = PCI_DEVFN(1, 0) },
3955 { 0 }
3956};
3957
3958static void quirk_fixed_dma_alias(struct pci_dev *dev)
3959{
3960 const struct pci_device_id *id;
3961
3962 id = pci_match_id(fixed_dma_alias_tbl, dev);
3963 if (id)
3964 pci_add_dma_alias(dev, id->driver_data);
3965}
3966
3967DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev)
3979{
3980 if (!pci_is_root_bus(pdev->bus) &&
3981 pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3982 !pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) &&
3983 pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE)
3984 pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS;
3985}
3986
3987DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
3988 quirk_use_pcie_bridge_dma_alias);
3989
3990DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
3991
3992DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
3993
3994DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8893, quirk_use_pcie_bridge_dma_alias);
3995
3996DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
3997
3998
3999
4000
4001
4002
4003
4004static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
4005{
4006 pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0));
4007 pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0));
4008 pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3));
4009}
4010DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
4011DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
4012
4013
4014
4015
4016
4017
4018static void quirk_bridge_cavm_thrx2_pcie_root(struct pci_dev *pdev)
4019{
4020 pdev->dev_flags |= PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT;
4021}
4022DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000,
4023 quirk_bridge_cavm_thrx2_pcie_root);
4024DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084,
4025 quirk_bridge_cavm_thrx2_pcie_root);
4026
4027
4028
4029
4030
4031static void quirk_tw686x_class(struct pci_dev *pdev)
4032{
4033 u32 class = pdev->class;
4034
4035
4036 pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01;
4037 pci_info(pdev, "TW686x PCI class overridden (%#08x -> %#08x)\n",
4038 class, pdev->class);
4039}
4040DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8,
4041 quirk_tw686x_class);
4042DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6865, PCI_CLASS_NOT_DEFINED, 8,
4043 quirk_tw686x_class);
4044DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
4045 quirk_tw686x_class);
4046DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
4047 quirk_tw686x_class);
4048
4049
4050
4051
4052
4053
4054static void quirk_relaxedordering_disable(struct pci_dev *dev)
4055{
4056 dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
4057 pci_info(dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
4058}
4059
4060
4061
4062
4063
4064
4065DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
4066 quirk_relaxedordering_disable);
4067DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8,
4068 quirk_relaxedordering_disable);
4069DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8,
4070 quirk_relaxedordering_disable);
4071DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8,
4072 quirk_relaxedordering_disable);
4073DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8,
4074 quirk_relaxedordering_disable);
4075DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8,
4076 quirk_relaxedordering_disable);
4077DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8,
4078 quirk_relaxedordering_disable);
4079DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8,
4080 quirk_relaxedordering_disable);
4081DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8,
4082 quirk_relaxedordering_disable);
4083DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8,
4084 quirk_relaxedordering_disable);
4085DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8,
4086 quirk_relaxedordering_disable);
4087DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8,
4088 quirk_relaxedordering_disable);
4089DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8,
4090 quirk_relaxedordering_disable);
4091DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8,
4092 quirk_relaxedordering_disable);
4093DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8,
4094 quirk_relaxedordering_disable);
4095DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8,
4096 quirk_relaxedordering_disable);
4097DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8,
4098 quirk_relaxedordering_disable);
4099DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8,
4100 quirk_relaxedordering_disable);
4101DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8,
4102 quirk_relaxedordering_disable);
4103DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8,
4104 quirk_relaxedordering_disable);
4105DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8,
4106 quirk_relaxedordering_disable);
4107DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8,
4108 quirk_relaxedordering_disable);
4109DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8,
4110 quirk_relaxedordering_disable);
4111DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8,
4112 quirk_relaxedordering_disable);
4113DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8,
4114 quirk_relaxedordering_disable);
4115DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8,
4116 quirk_relaxedordering_disable);
4117DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8,
4118 quirk_relaxedordering_disable);
4119DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8,
4120 quirk_relaxedordering_disable);
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8,
4132 quirk_relaxedordering_disable);
4133DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8,
4134 quirk_relaxedordering_disable);
4135DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8,
4136 quirk_relaxedordering_disable);
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
4162{
4163 struct pci_dev *root_port = pci_find_pcie_root_port(pdev);
4164
4165 if (!root_port) {
4166 pci_warn(pdev, "PCIe Completion erratum may cause device errors\n");
4167 return;
4168 }
4169
4170 pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
4171 dev_name(&pdev->dev));
4172 pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
4173 PCI_EXP_DEVCTL_RELAX_EN |
4174 PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
4175}
4176
4177
4178
4179
4180
4181static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
4182{
4183
4184
4185
4186
4187
4188
4189 if ((pdev->device & 0xff00) == 0x5400)
4190 quirk_disable_root_port_attributes(pdev);
4191}
4192DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
4193 quirk_chelsio_T5_disable_root_port_attributes);
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
4221{
4222#ifdef CONFIG_ACPI
4223 struct acpi_table_header *header = NULL;
4224 acpi_status status;
4225
4226
4227 if (!dev->multifunction || !pci_is_root_bus(dev->bus))
4228 return -ENODEV;
4229
4230
4231 status = acpi_get_table("IVRS", 0, &header);
4232 if (ACPI_FAILURE(status))
4233 return -ENODEV;
4234
4235
4236 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
4237
4238 return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
4239#else
4240 return -ENODEV;
4241#endif
4242}
4243
4244static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
4245{
4246
4247
4248
4249
4250
4251
4252 return (pci_is_pcie(dev) &&
4253 (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) &&
4254 ((dev->device & 0xf800) == 0xa000));
4255}
4256
4257static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
4258{
4259
4260
4261
4262
4263
4264
4265
4266
4267 acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
4268
4269 if (!pci_quirk_cavium_acs_match(dev))
4270 return -ENOTTY;
4271
4272 return acs_flags ? 0 : 1;
4273}
4274
4275static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
4276{
4277
4278
4279
4280
4281
4282 acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4283
4284 return acs_flags ? 0 : 1;
4285}
4286
4287
4288
4289
4290
4291
4292
4293static const u16 pci_quirk_intel_pch_acs_ids[] = {
4294
4295 0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49,
4296 0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51,
4297
4298 0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17,
4299 0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f,
4300
4301 0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17,
4302 0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f,
4303
4304 0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17,
4305 0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f,
4306
4307 0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17,
4308 0x9c18, 0x9c19, 0x9c1a, 0x9c1b,
4309
4310 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
4311 0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
4312
4313 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
4314
4315 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
4316 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
4317
4318 0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e,
4319};
4320
4321static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
4322{
4323 int i;
4324
4325
4326 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4327 return false;
4328
4329 for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++)
4330 if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
4331 return true;
4332
4333 return false;
4334}
4335
4336#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
4337
4338static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
4339{
4340 u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
4341 INTEL_PCH_ACS_FLAGS : 0;
4342
4343 if (!pci_quirk_intel_pch_acs_match(dev))
4344 return -ENOTTY;
4345
4346 return acs_flags & ~flags ? 0 : 1;
4347}
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4360{
4361 u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV);
4362 int ret = acs_flags & ~flags ? 0 : 1;
4363
4364 pci_info(dev, "Using QCOM ACS Quirk (%d)\n", ret);
4365
4366 return ret;
4367}
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4415{
4416 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4417 return false;
4418
4419 switch (dev->device) {
4420 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a:
4421 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee:
4422 case 0x9d10 ... 0x9d1b:
4423 return true;
4424 }
4425
4426 return false;
4427}
4428
4429#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
4430
4431static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
4432{
4433 int pos;
4434 u32 cap, ctrl;
4435
4436 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4437 return -ENOTTY;
4438
4439 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
4440 if (!pos)
4441 return -ENOTTY;
4442
4443
4444 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4445 acs_flags &= (cap | PCI_ACS_EC);
4446
4447 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4448
4449 return acs_flags & ~ctrl ? 0 : 1;
4450}
4451
4452static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
4453{
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463 acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
4464 PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
4465
4466 return acs_flags ? 0 : 1;
4467}
4468
4469static const struct pci_dev_acs_enabled {
4470 u16 vendor;
4471 u16 device;
4472 int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
4473} pci_dev_acs_enabled[] = {
4474 { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
4475 { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
4476 { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
4477 { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
4478 { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
4479 { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
4480 { PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs },
4481 { PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
4482 { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
4483 { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
4484 { PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs },
4485 { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
4486 { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
4487 { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
4488 { PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
4489 { PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
4490 { PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
4491 { PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
4492 { PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
4493 { PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
4494 { PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
4495 { PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
4496 { PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
4497 { PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
4498 { PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
4499 { PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
4500 { PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
4501 { PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
4502 { PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
4503 { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
4504 { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
4505
4506 { PCI_VENDOR_ID_INTEL, 0x1509, pci_quirk_mf_endpoint_acs },
4507 { PCI_VENDOR_ID_INTEL, 0x150E, pci_quirk_mf_endpoint_acs },
4508 { PCI_VENDOR_ID_INTEL, 0x150F, pci_quirk_mf_endpoint_acs },
4509 { PCI_VENDOR_ID_INTEL, 0x1510, pci_quirk_mf_endpoint_acs },
4510 { PCI_VENDOR_ID_INTEL, 0x1511, pci_quirk_mf_endpoint_acs },
4511 { PCI_VENDOR_ID_INTEL, 0x1516, pci_quirk_mf_endpoint_acs },
4512 { PCI_VENDOR_ID_INTEL, 0x1527, pci_quirk_mf_endpoint_acs },
4513
4514 { PCI_VENDOR_ID_INTEL, 0x10C9, pci_quirk_mf_endpoint_acs },
4515 { PCI_VENDOR_ID_INTEL, 0x10E6, pci_quirk_mf_endpoint_acs },
4516 { PCI_VENDOR_ID_INTEL, 0x10E7, pci_quirk_mf_endpoint_acs },
4517 { PCI_VENDOR_ID_INTEL, 0x10E8, pci_quirk_mf_endpoint_acs },
4518 { PCI_VENDOR_ID_INTEL, 0x150A, pci_quirk_mf_endpoint_acs },
4519 { PCI_VENDOR_ID_INTEL, 0x150D, pci_quirk_mf_endpoint_acs },
4520 { PCI_VENDOR_ID_INTEL, 0x1518, pci_quirk_mf_endpoint_acs },
4521 { PCI_VENDOR_ID_INTEL, 0x1526, pci_quirk_mf_endpoint_acs },
4522
4523 { PCI_VENDOR_ID_INTEL, 0x10A7, pci_quirk_mf_endpoint_acs },
4524 { PCI_VENDOR_ID_INTEL, 0x10A9, pci_quirk_mf_endpoint_acs },
4525 { PCI_VENDOR_ID_INTEL, 0x10D6, pci_quirk_mf_endpoint_acs },
4526
4527 { PCI_VENDOR_ID_INTEL, 0x1521, pci_quirk_mf_endpoint_acs },
4528 { PCI_VENDOR_ID_INTEL, 0x1522, pci_quirk_mf_endpoint_acs },
4529 { PCI_VENDOR_ID_INTEL, 0x1523, pci_quirk_mf_endpoint_acs },
4530 { PCI_VENDOR_ID_INTEL, 0x1524, pci_quirk_mf_endpoint_acs },
4531
4532 { PCI_VENDOR_ID_INTEL, 0x105E, pci_quirk_mf_endpoint_acs },
4533 { PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs },
4534 { PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs },
4535 { PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs },
4536
4537 { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
4538 { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
4539
4540 { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
4541 { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
4542
4543 { PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs },
4544
4545 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
4546 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
4547 { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs },
4548 { 0x10df, 0x720, pci_quirk_mf_endpoint_acs },
4549
4550 { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
4551
4552 { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
4553
4554 { PCI_VENDOR_ID_AMPERE, 0xE005, pci_quirk_xgene_acs },
4555 { PCI_VENDOR_ID_AMPERE, 0xE006, pci_quirk_xgene_acs },
4556 { PCI_VENDOR_ID_AMPERE, 0xE007, pci_quirk_xgene_acs },
4557 { PCI_VENDOR_ID_AMPERE, 0xE008, pci_quirk_xgene_acs },
4558 { PCI_VENDOR_ID_AMPERE, 0xE009, pci_quirk_xgene_acs },
4559 { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
4560 { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
4561 { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
4562 { 0 }
4563};
4564
4565int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
4566{
4567 const struct pci_dev_acs_enabled *i;
4568 int ret;
4569
4570
4571
4572
4573
4574
4575
4576 for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
4577 if ((i->vendor == dev->vendor ||
4578 i->vendor == (u16)PCI_ANY_ID) &&
4579 (i->device == dev->device ||
4580 i->device == (u16)PCI_ANY_ID)) {
4581 ret = i->acs_enabled(dev, acs_flags);
4582 if (ret >= 0)
4583 return ret;
4584 }
4585 }
4586
4587 return -ENOTTY;
4588}
4589
4590
4591#define INTEL_LPC_RCBA_REG 0xf0
4592
4593#define INTEL_LPC_RCBA_MASK 0xffffc000
4594
4595#define INTEL_LPC_RCBA_ENABLE (1 << 0)
4596
4597
4598#define INTEL_BSPR_REG 0x1104
4599
4600#define INTEL_BSPR_REG_BPNPD (1 << 8)
4601
4602#define INTEL_BSPR_REG_BPPD (1 << 9)
4603
4604
4605#define INTEL_UPDCR_REG 0x1114
4606
4607#define INTEL_UPDCR_REG_MASK 0x3f
4608
4609static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
4610{
4611 u32 rcba, bspr, updcr;
4612 void __iomem *rcba_mem;
4613
4614
4615
4616
4617
4618
4619 pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
4620 INTEL_LPC_RCBA_REG, &rcba);
4621 if (!(rcba & INTEL_LPC_RCBA_ENABLE))
4622 return -EINVAL;
4623
4624 rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK,
4625 PAGE_ALIGN(INTEL_UPDCR_REG));
4626 if (!rcba_mem)
4627 return -ENOMEM;
4628
4629
4630
4631
4632
4633
4634
4635
4636 bspr = readl(rcba_mem + INTEL_BSPR_REG);
4637 bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD;
4638 if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
4639 updcr = readl(rcba_mem + INTEL_UPDCR_REG);
4640 if (updcr & INTEL_UPDCR_REG_MASK) {
4641 pci_info(dev, "Disabling UPDCR peer decodes\n");
4642 updcr &= ~INTEL_UPDCR_REG_MASK;
4643 writel(updcr, rcba_mem + INTEL_UPDCR_REG);
4644 }
4645 }
4646
4647 iounmap(rcba_mem);
4648 return 0;
4649}
4650
4651
4652#define INTEL_MPC_REG 0xd8
4653
4654#define INTEL_MPC_REG_IRBNCE (1 << 26)
4655
4656static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
4657{
4658 u32 mpc;
4659
4660
4661
4662
4663
4664
4665
4666 pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
4667 if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
4668 pci_info(dev, "Enabling MPC IRBNCE\n");
4669 mpc |= INTEL_MPC_REG_IRBNCE;
4670 pci_write_config_word(dev, INTEL_MPC_REG, mpc);
4671 }
4672}
4673
4674static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
4675{
4676 if (!pci_quirk_intel_pch_acs_match(dev))
4677 return -ENOTTY;
4678
4679 if (pci_quirk_enable_intel_lpc_acs(dev)) {
4680 pci_warn(dev, "Failed to enable Intel PCH ACS quirk\n");
4681 return 0;
4682 }
4683
4684 pci_quirk_enable_intel_rp_mpc_acs(dev);
4685
4686 dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
4687
4688 pci_info(dev, "Intel PCH root port ACS workaround enabled\n");
4689
4690 return 0;
4691}
4692
4693static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
4694{
4695 int pos;
4696 u32 cap, ctrl;
4697
4698 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4699 return -ENOTTY;
4700
4701 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
4702 if (!pos)
4703 return -ENOTTY;
4704
4705 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4706 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4707
4708 ctrl |= (cap & PCI_ACS_SV);
4709 ctrl |= (cap & PCI_ACS_RR);
4710 ctrl |= (cap & PCI_ACS_CR);
4711 ctrl |= (cap & PCI_ACS_UF);
4712
4713 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
4714
4715 pci_info(dev, "Intel SPT PCH root port ACS workaround enabled\n");
4716
4717 return 0;
4718}
4719
4720static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev)
4721{
4722 int pos;
4723 u32 cap, ctrl;
4724
4725 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4726 return -ENOTTY;
4727
4728 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
4729 if (!pos)
4730 return -ENOTTY;
4731
4732 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4733 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4734
4735 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
4736
4737 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
4738
4739 pci_info(dev, "Intel SPT PCH root port workaround: disabled ACS redirect\n");
4740
4741 return 0;
4742}
4743
4744static const struct pci_dev_acs_ops {
4745 u16 vendor;
4746 u16 device;
4747 int (*enable_acs)(struct pci_dev *dev);
4748 int (*disable_acs_redir)(struct pci_dev *dev);
4749} pci_dev_acs_ops[] = {
4750 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
4751 .enable_acs = pci_quirk_enable_intel_pch_acs,
4752 },
4753 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
4754 .enable_acs = pci_quirk_enable_intel_spt_pch_acs,
4755 .disable_acs_redir = pci_quirk_disable_intel_spt_pch_acs_redir,
4756 },
4757};
4758
4759int pci_dev_specific_enable_acs(struct pci_dev *dev)
4760{
4761 const struct pci_dev_acs_ops *p;
4762 int i, ret;
4763
4764 for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
4765 p = &pci_dev_acs_ops[i];
4766 if ((p->vendor == dev->vendor ||
4767 p->vendor == (u16)PCI_ANY_ID) &&
4768 (p->device == dev->device ||
4769 p->device == (u16)PCI_ANY_ID) &&
4770 p->enable_acs) {
4771 ret = p->enable_acs(dev);
4772 if (ret >= 0)
4773 return ret;
4774 }
4775 }
4776
4777 return -ENOTTY;
4778}
4779
4780int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
4781{
4782 const struct pci_dev_acs_ops *p;
4783 int i, ret;
4784
4785 for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
4786 p = &pci_dev_acs_ops[i];
4787 if ((p->vendor == dev->vendor ||
4788 p->vendor == (u16)PCI_ANY_ID) &&
4789 (p->device == dev->device ||
4790 p->device == (u16)PCI_ANY_ID) &&
4791 p->disable_acs_redir) {
4792 ret = p->disable_acs_redir(dev);
4793 if (ret >= 0)
4794 return ret;
4795 }
4796 }
4797
4798 return -ENOTTY;
4799}
4800
4801
4802
4803
4804
4805
4806
4807
4808static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
4809{
4810 int pos, i = 0;
4811 u8 next_cap;
4812 u16 reg16, *cap;
4813 struct pci_cap_saved_state *state;
4814
4815
4816 if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP))
4817 return;
4818
4819
4820 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
4821 if (!pos)
4822 return;
4823
4824
4825
4826
4827
4828 pci_read_config_byte(pdev, pos + 1, &next_cap);
4829 if (next_cap)
4830 return;
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840 pos = 0x50;
4841 pci_read_config_word(pdev, pos, ®16);
4842 if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) {
4843 u32 status;
4844#ifndef PCI_EXP_SAVE_REGS
4845#define PCI_EXP_SAVE_REGS 7
4846#endif
4847 int size = PCI_EXP_SAVE_REGS * sizeof(u16);
4848
4849 pdev->pcie_cap = pos;
4850 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
4851 pdev->pcie_flags_reg = reg16;
4852 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16);
4853 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
4854
4855 pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
4856 if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
4857 PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
4858 pdev->cfg_size = PCI_CFG_SPACE_SIZE;
4859
4860 if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
4861 return;
4862
4863
4864 state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
4865 if (!state)
4866 return;
4867
4868 state->cap.cap_nr = PCI_CAP_ID_EXP;
4869 state->cap.cap_extended = 0;
4870 state->cap.size = size;
4871 cap = (u16 *)&state->cap.data[0];
4872 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]);
4873 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]);
4874 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]);
4875 pcie_capability_read_word(pdev, PCI_EXP_RTCTL, &cap[i++]);
4876 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]);
4877 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]);
4878 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]);
4879 hlist_add_head(&state->next, &pdev->saved_cap_space);
4880 }
4881}
4882DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
4883
4884
4885static void quirk_intel_no_flr(struct pci_dev *dev)
4886{
4887 dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
4888}
4889DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr);
4890DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr);
4891
4892static void quirk_no_ext_tags(struct pci_dev *pdev)
4893{
4894 struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
4895
4896 if (!bridge)
4897 return;
4898
4899 bridge->no_ext_tags = 1;
4900 pci_info(pdev, "disabling Extended Tags (this device can't handle them)\n");
4901
4902 pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
4903}
4904DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
4905DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
4906DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
4907DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags);
4908DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);
4909DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
4910DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
4911
4912#ifdef CONFIG_PCI_ATS
4913
4914
4915
4916
4917static void quirk_no_ats(struct pci_dev *pdev)
4918{
4919 pci_info(pdev, "disabling ATS (broken on this device)\n");
4920 pdev->ats_cap = 0;
4921}
4922
4923
4924DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
4925DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
4926#endif
4927
4928
4929static void quirk_fsl_no_msi(struct pci_dev *pdev)
4930{
4931 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
4932 pdev->no_msi = 1;
4933}
4934DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer,
4945 unsigned int supplier, unsigned int class,
4946 unsigned int class_shift)
4947{
4948 struct pci_dev *supplier_pdev;
4949
4950 if (PCI_FUNC(pdev->devfn) != consumer)
4951 return;
4952
4953 supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
4954 pdev->bus->number,
4955 PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier));
4956 if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) {
4957 pci_dev_put(supplier_pdev);
4958 return;
4959 }
4960
4961 if (device_link_add(&pdev->dev, &supplier_pdev->dev,
4962 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME))
4963 pci_info(pdev, "D0 power state depends on %s\n",
4964 pci_name(supplier_pdev));
4965 else
4966 pci_err(pdev, "Cannot enforce power dependency on %s\n",
4967 pci_name(supplier_pdev));
4968
4969 pm_runtime_allow(&pdev->dev);
4970 pci_dev_put(supplier_pdev);
4971}
4972
4973
4974
4975
4976
4977static void quirk_gpu_hda(struct pci_dev *hda)
4978{
4979 pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16);
4980}
4981DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
4982 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
4983DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID,
4984 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
4985DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
4986 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
4987
4988
4989
4990
4991
4992static void quirk_gpu_usb(struct pci_dev *usb)
4993{
4994 pci_create_device_link(usb, 2, 0, PCI_BASE_CLASS_DISPLAY, 16);
4995}
4996DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
4997 PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
4998
4999
5000
5001
5002
5003
5004
5005#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80
5006static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
5007{
5008 pci_create_device_link(ucsi, 3, 0, PCI_BASE_CLASS_DISPLAY, 16);
5009}
5010DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5011 PCI_CLASS_SERIAL_UNKNOWN, 8,
5012 quirk_gpu_usb_typec_ucsi);
5013
5014
5015
5016
5017
5018static void quirk_nvidia_hda(struct pci_dev *gpu)
5019{
5020 u8 hdr_type;
5021 u32 val;
5022
5023
5024 if (gpu->device < PCI_DEVICE_ID_NVIDIA_GEFORCE_320M)
5025 return;
5026
5027
5028 pci_read_config_dword(gpu, 0x488, &val);
5029 if (val & BIT(25))
5030 return;
5031
5032 pci_info(gpu, "Enabling HDA controller\n");
5033 pci_write_config_dword(gpu, 0x488, val | BIT(25));
5034
5035
5036 pci_read_config_byte(gpu, PCI_HEADER_TYPE, &hdr_type);
5037 gpu->multifunction = !!(hdr_type & 0x80);
5038}
5039DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5040 PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
5041DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5042 PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout)
5070{
5071 int pos;
5072 u16 ctrl = 0;
5073 bool found;
5074 struct pci_dev *bridge = bus->self;
5075
5076 pos = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ACS);
5077
5078
5079 if (pos) {
5080 pci_read_config_word(bridge, pos + PCI_ACS_CTRL, &ctrl);
5081 if (ctrl & PCI_ACS_SV)
5082 pci_write_config_word(bridge, pos + PCI_ACS_CTRL,
5083 ctrl & ~PCI_ACS_SV);
5084 }
5085
5086 found = pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
5087
5088
5089 if (found)
5090 pci_bus_write_config_word(bus, devfn, PCI_VENDOR_ID, 0);
5091
5092
5093 if (ctrl & PCI_ACS_SV)
5094 pci_write_config_word(bridge, pos + PCI_ACS_CTRL, ctrl);
5095
5096 return found;
5097}
5098
5099
5100
5101
5102
5103
5104
5105
5106static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
5107{
5108 void __iomem *mmio;
5109 struct ntb_info_regs __iomem *mmio_ntb;
5110 struct ntb_ctrl_regs __iomem *mmio_ctrl;
5111 u64 partition_map;
5112 u8 partition;
5113 int pp;
5114
5115 if (pci_enable_device(pdev)) {
5116 pci_err(pdev, "Cannot enable Switchtec device\n");
5117 return;
5118 }
5119
5120 mmio = pci_iomap(pdev, 0, 0);
5121 if (mmio == NULL) {
5122 pci_disable_device(pdev);
5123 pci_err(pdev, "Cannot iomap Switchtec device\n");
5124 return;
5125 }
5126
5127 pci_info(pdev, "Setting Switchtec proxy ID aliases\n");
5128
5129 mmio_ntb = mmio + SWITCHTEC_GAS_NTB_OFFSET;
5130 mmio_ctrl = (void __iomem *) mmio_ntb + SWITCHTEC_NTB_REG_CTRL_OFFSET;
5131
5132 partition = ioread8(&mmio_ntb->partition_id);
5133
5134 partition_map = ioread32(&mmio_ntb->ep_map);
5135 partition_map |= ((u64) ioread32(&mmio_ntb->ep_map + 4)) << 32;
5136 partition_map &= ~(1ULL << partition);
5137
5138 for (pp = 0; pp < (sizeof(partition_map) * 8); pp++) {
5139 struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
5140 u32 table_sz = 0;
5141 int te;
5142
5143 if (!(partition_map & (1ULL << pp)))
5144 continue;
5145
5146 pci_dbg(pdev, "Processing partition %d\n", pp);
5147
5148 mmio_peer_ctrl = &mmio_ctrl[pp];
5149
5150 table_sz = ioread16(&mmio_peer_ctrl->req_id_table_size);
5151 if (!table_sz) {
5152 pci_warn(pdev, "Partition %d table_sz 0\n", pp);
5153 continue;
5154 }
5155
5156 if (table_sz > 512) {
5157 pci_warn(pdev,
5158 "Invalid Switchtec partition %d table_sz %d\n",
5159 pp, table_sz);
5160 continue;
5161 }
5162
5163 for (te = 0; te < table_sz; te++) {
5164 u32 rid_entry;
5165 u8 devfn;
5166
5167 rid_entry = ioread32(&mmio_peer_ctrl->req_id_table[te]);
5168 devfn = (rid_entry >> 1) & 0xFF;
5169 pci_dbg(pdev,
5170 "Aliasing Partition %d Proxy ID %02x.%d\n",
5171 pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
5172 pci_add_dma_alias(pdev, devfn);
5173 }
5174 }
5175
5176 pci_iounmap(pdev, mmio);
5177 pci_disable_device(pdev);
5178}
5179#define SWITCHTEC_QUIRK(vid) \
5180 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
5181 PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
5182
5183SWITCHTEC_QUIRK(0x8531);
5184SWITCHTEC_QUIRK(0x8532);
5185SWITCHTEC_QUIRK(0x8533);
5186SWITCHTEC_QUIRK(0x8534);
5187SWITCHTEC_QUIRK(0x8535);
5188SWITCHTEC_QUIRK(0x8536);
5189SWITCHTEC_QUIRK(0x8541);
5190SWITCHTEC_QUIRK(0x8542);
5191SWITCHTEC_QUIRK(0x8543);
5192SWITCHTEC_QUIRK(0x8544);
5193SWITCHTEC_QUIRK(0x8545);
5194SWITCHTEC_QUIRK(0x8546);
5195SWITCHTEC_QUIRK(0x8551);
5196SWITCHTEC_QUIRK(0x8552);
5197SWITCHTEC_QUIRK(0x8553);
5198SWITCHTEC_QUIRK(0x8554);
5199SWITCHTEC_QUIRK(0x8555);
5200SWITCHTEC_QUIRK(0x8556);
5201SWITCHTEC_QUIRK(0x8561);
5202SWITCHTEC_QUIRK(0x8562);
5203SWITCHTEC_QUIRK(0x8563);
5204SWITCHTEC_QUIRK(0x8564);
5205SWITCHTEC_QUIRK(0x8565);
5206SWITCHTEC_QUIRK(0x8566);
5207SWITCHTEC_QUIRK(0x8571);
5208SWITCHTEC_QUIRK(0x8572);
5209SWITCHTEC_QUIRK(0x8573);
5210SWITCHTEC_QUIRK(0x8574);
5211SWITCHTEC_QUIRK(0x8575);
5212SWITCHTEC_QUIRK(0x8576);
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
5231{
5232 void __iomem *map;
5233 int ret;
5234
5235 if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
5236 pdev->subsystem_device != 0x222e ||
5237 !pdev->reset_fn)
5238 return;
5239
5240 if (pci_enable_device_mem(pdev))
5241 return;
5242
5243
5244
5245
5246
5247 map = pci_iomap(pdev, 0, 0x23000);
5248 if (!map) {
5249 pci_err(pdev, "Can't map MMIO space\n");
5250 goto out_disable;
5251 }
5252
5253
5254
5255
5256
5257 if (ioread32(map + 0x2240c) & 0x2) {
5258 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
5259 ret = pci_reset_bus(pdev);
5260 if (ret < 0)
5261 pci_err(pdev, "Failed to reset GPU: %d\n", ret);
5262 }
5263
5264 iounmap(map);
5265out_disable:
5266 pci_disable_device(pdev);
5267}
5268DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
5269 PCI_CLASS_DISPLAY_VGA, 8,
5270 quirk_reset_lenovo_thinkpad_p50_nvgpu);
5271