1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/export.h>
17#include <linux/pci.h>
18#include <linux/init.h>
19#include <linux/delay.h>
20#include <linux/acpi.h>
21#include <linux/kallsyms.h>
22#include <linux/dmi.h>
23#include <linux/pci-aspm.h>
24#include <linux/ioport.h>
25#include <linux/sched.h>
26#include <linux/ktime.h>
27#include <linux/mm.h>
28#include <asm/dma.h>
29#include "pci.h"
30
31
32
33
34
35
36
37static void quirk_mmio_always_on(struct pci_dev *dev)
38{
39 dev->mmio_always_on = 1;
40}
41DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
42 PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
43
44
45
46
47
48static void quirk_mellanox_tavor(struct pci_dev *dev)
49{
50 dev->broken_parity_status = 1;
51}
52DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
53DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
54
55
56
57static void quirk_passive_release(struct pci_dev *dev)
58{
59 struct pci_dev *d = NULL;
60 unsigned char dlc;
61
62
63
64 while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
65 pci_read_config_byte(d, 0x82, &dlc);
66 if (!(dlc & 1<<1)) {
67 dev_info(&d->dev, "PIIX3: Enabling Passive Release\n");
68 dlc |= 1<<1;
69 pci_write_config_byte(d, 0x82, dlc);
70 }
71 }
72}
73DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
74DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
75
76
77
78
79
80
81
82
83static void quirk_isa_dma_hangs(struct pci_dev *dev)
84{
85 if (!isa_dma_bridge_buggy) {
86 isa_dma_bridge_buggy = 1;
87 dev_info(&dev->dev, "Activating ISA DMA hang workarounds\n");
88 }
89}
90
91
92
93
94DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
95DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
96DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
97DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
98DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
99DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
100DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
101
102
103
104
105
106static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
107{
108 u32 pmbase;
109 u16 pm1a;
110
111 pci_read_config_dword(dev, 0x40, &pmbase);
112 pmbase = pmbase & 0xff80;
113 pm1a = inw(pmbase);
114
115 if (pm1a & 0x10) {
116 dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
117 outw(0x10, pmbase);
118 }
119}
120DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
121
122
123
124
125static void quirk_nopcipci(struct pci_dev *dev)
126{
127 if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
128 dev_info(&dev->dev, "Disabling direct PCI/PCI transfers\n");
129 pci_pci_problems |= PCIPCI_FAIL;
130 }
131}
132DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci);
133DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci);
134
135static void quirk_nopciamd(struct pci_dev *dev)
136{
137 u8 rev;
138 pci_read_config_byte(dev, 0x08, &rev);
139 if (rev == 0x13) {
140
141 dev_info(&dev->dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
142 pci_pci_problems |= PCIAGP_FAIL;
143 }
144}
145DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd);
146
147
148
149
150static void quirk_triton(struct pci_dev *dev)
151{
152 if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
153 dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
154 pci_pci_problems |= PCIPCI_TRITON;
155 }
156}
157DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
158DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
159DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
160DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
161
162
163
164
165
166
167
168
169
170
171
172static void quirk_vialatency(struct pci_dev *dev)
173{
174 struct pci_dev *p;
175 u8 busarb;
176
177
178
179 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
180 if (p != NULL) {
181
182
183 if (p->revision < 0x40 || p->revision > 0x42)
184 goto exit;
185 } else {
186 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
187 if (p == NULL)
188 goto exit;
189
190 if (p->revision < 0x10 || p->revision > 0x12)
191 goto exit;
192 }
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207 pci_read_config_byte(dev, 0x76, &busarb);
208
209
210 busarb &= ~(1<<5);
211 busarb |= (1<<4);
212 pci_write_config_byte(dev, 0x76, busarb);
213 dev_info(&dev->dev, "Applying VIA southbridge workaround\n");
214exit:
215 pci_dev_put(p);
216}
217DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
218DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
219DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
220
221DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
222DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
223DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
224
225
226
227
228static void quirk_viaetbf(struct pci_dev *dev)
229{
230 if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
231 dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
232 pci_pci_problems |= PCIPCI_VIAETBF;
233 }
234}
235DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf);
236
237static void quirk_vsfx(struct pci_dev *dev)
238{
239 if ((pci_pci_problems&PCIPCI_VSFX) == 0) {
240 dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
241 pci_pci_problems |= PCIPCI_VSFX;
242 }
243}
244DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx);
245
246
247
248
249
250
251
252static void quirk_alimagik(struct pci_dev *dev)
253{
254 if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) {
255 dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
256 pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
257 }
258}
259DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
260DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
261
262
263
264
265
266static void quirk_natoma(struct pci_dev *dev)
267{
268 if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
269 dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
270 pci_pci_problems |= PCIPCI_NATOMA;
271 }
272}
273DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
274DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
275DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
276DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
277DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
278DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
279
280
281
282
283
284static void quirk_citrine(struct pci_dev *dev)
285{
286 dev->cfg_size = 0xA0;
287}
288DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
289
290
291
292
293
294static void quirk_nfp6000(struct pci_dev *dev)
295{
296 dev->cfg_size = 0x600;
297}
298DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000);
299DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000);
300DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000);
301
302
303static void quirk_extend_bar_to_page(struct pci_dev *dev)
304{
305 int i;
306
307 for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
308 struct resource *r = &dev->resource[i];
309
310 if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
311 r->end = PAGE_SIZE - 1;
312 r->start = 0;
313 r->flags |= IORESOURCE_UNSET;
314 dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
315 i, r);
316 }
317 }
318}
319DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
320
321
322
323
324
325static void quirk_s3_64M(struct pci_dev *dev)
326{
327 struct resource *r = &dev->resource[0];
328
329 if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
330 r->flags |= IORESOURCE_UNSET;
331 r->start = 0;
332 r->end = 0x3ffffff;
333 }
334}
335DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
336DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
337
338static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
339 const char *name)
340{
341 u32 region;
342 struct pci_bus_region bus_region;
343 struct resource *res = dev->resource + pos;
344
345 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion);
346
347 if (!region)
348 return;
349
350 res->name = pci_name(dev);
351 res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
352 res->flags |=
353 (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
354 region &= ~(size - 1);
355
356
357 bus_region.start = region;
358 bus_region.end = region + size - 1;
359 pcibios_bus_to_resource(dev->bus, res, &bus_region);
360
361 dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
362 name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
363}
364
365
366
367
368
369
370
371
372
373
374static void quirk_cs5536_vsa(struct pci_dev *dev)
375{
376 static char *name = "CS5536 ISA bridge";
377
378 if (pci_resource_len(dev, 0) != 8) {
379 quirk_io(dev, 0, 8, name);
380 quirk_io(dev, 1, 256, name);
381 quirk_io(dev, 2, 64, name);
382 dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n",
383 name);
384 }
385}
386DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
387
388static void quirk_io_region(struct pci_dev *dev, int port,
389 unsigned size, int nr, const char *name)
390{
391 u16 region;
392 struct pci_bus_region bus_region;
393 struct resource *res = dev->resource + nr;
394
395 pci_read_config_word(dev, port, ®ion);
396 region &= ~(size - 1);
397
398 if (!region)
399 return;
400
401 res->name = pci_name(dev);
402 res->flags = IORESOURCE_IO;
403
404
405 bus_region.start = region;
406 bus_region.end = region + size - 1;
407 pcibios_bus_to_resource(dev->bus, res, &bus_region);
408
409 if (!pci_claim_resource(dev, nr))
410 dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name);
411}
412
413
414
415
416
417static void quirk_ati_exploding_mce(struct pci_dev *dev)
418{
419 dev_info(&dev->dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
420
421 request_region(0x3b0, 0x0C, "RadeonIGP");
422 request_region(0x3d3, 0x01, "RadeonIGP");
423}
424DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce);
425
426
427
428
429
430
431
432
433
434
435
436static void quirk_amd_nl_class(struct pci_dev *pdev)
437{
438 u32 class = pdev->class;
439
440
441 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
442 dev_info(&pdev->dev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
443 class, pdev->class);
444}
445DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
446 quirk_amd_nl_class);
447
448
449
450
451
452
453
454
455
456
457
458
459static void quirk_ali7101_acpi(struct pci_dev *dev)
460{
461 quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
462 quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
463}
464DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi);
465
466static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
467{
468 u32 devres;
469 u32 mask, size, base;
470
471 pci_read_config_dword(dev, port, &devres);
472 if ((devres & enable) != enable)
473 return;
474 mask = (devres >> 16) & 15;
475 base = devres & 0xffff;
476 size = 16;
477 for (;;) {
478 unsigned bit = size >> 1;
479 if ((bit & mask) == bit)
480 break;
481 size = bit;
482 }
483
484
485
486
487
488 base &= -size;
489 dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base,
490 base + size - 1);
491}
492
493static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
494{
495 u32 devres;
496 u32 mask, size, base;
497
498 pci_read_config_dword(dev, port, &devres);
499 if ((devres & enable) != enable)
500 return;
501 base = devres & 0xffff0000;
502 mask = (devres & 0x3f) << 16;
503 size = 128 << 16;
504 for (;;) {
505 unsigned bit = size >> 1;
506 if ((bit & mask) == bit)
507 break;
508 size = bit;
509 }
510
511
512
513
514 base &= -size;
515 dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base,
516 base + size - 1);
517}
518
519
520
521
522
523
524
525static void quirk_piix4_acpi(struct pci_dev *dev)
526{
527 u32 res_a;
528
529 quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
530 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
531
532
533 pci_read_config_dword(dev, 0x5c, &res_a);
534
535 piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
536 piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
537
538
539
540
541 if (res_a & (1 << 29)) {
542 piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
543 piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
544 }
545
546 if (res_a & (1 << 30)) {
547 piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
548 piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
549 }
550 piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
551 piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
552}
553DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi);
554DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi);
555
556#define ICH_PMBASE 0x40
557#define ICH_ACPI_CNTL 0x44
558#define ICH4_ACPI_EN 0x10
559#define ICH6_ACPI_EN 0x80
560#define ICH4_GPIOBASE 0x58
561#define ICH4_GPIO_CNTL 0x5c
562#define ICH4_GPIO_EN 0x10
563#define ICH6_GPIOBASE 0x48
564#define ICH6_GPIO_CNTL 0x4c
565#define ICH6_GPIO_EN 0x10
566
567
568
569
570
571
572static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
573{
574 u8 enable;
575
576
577
578
579
580
581
582
583
584 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
585 if (enable & ICH4_ACPI_EN)
586 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
587 "ICH4 ACPI/GPIO/TCO");
588
589 pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
590 if (enable & ICH4_GPIO_EN)
591 quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
592 "ICH4 GPIO");
593}
594DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
595DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
596DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi);
597DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi);
598DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi);
599DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi);
600DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi);
601DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi);
602DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi);
603DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi);
604
605static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
606{
607 u8 enable;
608
609 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
610 if (enable & ICH6_ACPI_EN)
611 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
612 "ICH6 ACPI/GPIO/TCO");
613
614 pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
615 if (enable & ICH6_GPIO_EN)
616 quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
617 "ICH6 GPIO");
618}
619
620static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
621{
622 u32 val;
623 u32 size, base;
624
625 pci_read_config_dword(dev, reg, &val);
626
627
628 if (!(val & 1))
629 return;
630 base = val & 0xfffc;
631 if (dynsize) {
632
633
634
635
636
637
638 size = 16;
639 } else {
640 size = 128;
641 }
642 base &= ~(size-1);
643
644
645 dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
646}
647
648static void quirk_ich6_lpc(struct pci_dev *dev)
649{
650
651 ich6_lpc_acpi_gpio(dev);
652
653
654 ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
655 ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
656}
657DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
658DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
659
660static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name)
661{
662 u32 val;
663 u32 mask, base;
664
665 pci_read_config_dword(dev, reg, &val);
666
667
668 if (!(val & 1))
669 return;
670
671
672
673
674
675 base = val & 0xfffc;
676 mask = (val >> 16) & 0xfc;
677 mask |= 3;
678
679
680 dev_info(&dev->dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
681}
682
683
684static void quirk_ich7_lpc(struct pci_dev *dev)
685{
686
687 ich6_lpc_acpi_gpio(dev);
688
689
690 ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
691 ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
692 ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
693 ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
694}
695DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc);
696DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc);
697DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc);
698DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc);
699DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc);
700DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc);
701DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc);
702DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc);
703DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc);
704DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc);
705DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc);
706DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc);
707DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc);
708
709
710
711
712
713static void quirk_vt82c586_acpi(struct pci_dev *dev)
714{
715 if (dev->revision & 0x10)
716 quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
717 "vt82c586 ACPI");
718}
719DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi);
720
721
722
723
724
725
726
727static void quirk_vt82c686_acpi(struct pci_dev *dev)
728{
729 quirk_vt82c586_acpi(dev);
730
731 quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
732 "vt82c686 HW-mon");
733
734 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
735}
736DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi);
737
738
739
740
741
742
743static void quirk_vt8235_acpi(struct pci_dev *dev)
744{
745 quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
746 quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
747}
748DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
749
750
751
752
753
754static void quirk_xio2000a(struct pci_dev *dev)
755{
756 struct pci_dev *pdev;
757 u16 command;
758
759 dev_warn(&dev->dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
760 list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
761 pci_read_config_word(pdev, PCI_COMMAND, &command);
762 if (command & PCI_COMMAND_FAST_BACK)
763 pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
764 }
765}
766DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
767 quirk_xio2000a);
768
769#ifdef CONFIG_X86_IO_APIC
770
771#include <asm/io_apic.h>
772
773
774
775
776
777
778
779
780static void quirk_via_ioapic(struct pci_dev *dev)
781{
782 u8 tmp;
783
784 if (nr_ioapics < 1)
785 tmp = 0;
786 else
787 tmp = 0x1f;
788
789 dev_info(&dev->dev, "%sbling VIA external APIC routing\n",
790 tmp == 0 ? "Disa" : "Ena");
791
792
793 pci_write_config_byte(dev, 0x58, tmp);
794}
795DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
796DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
797
798
799
800
801
802
803
804static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
805{
806 u8 misc_control2;
807#define BYPASS_APIC_DEASSERT 8
808
809 pci_read_config_byte(dev, 0x5B, &misc_control2);
810 if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
811 dev_info(&dev->dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
812 pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
813 }
814}
815DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
816DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
817
818
819
820
821
822
823
824
825
826
827static void quirk_amd_ioapic(struct pci_dev *dev)
828{
829 if (dev->revision >= 0x02) {
830 dev_warn(&dev->dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
831 dev_warn(&dev->dev, " : booting with the \"noapic\" option\n");
832 }
833}
834DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
835#endif
836
837#if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS)
838
839static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
840{
841
842 if (dev->subsystem_device == 0xa118)
843 dev->sriov->link = dev->devfn;
844}
845DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link);
846#endif
847
848
849
850
851
852static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
853{
854 if (dev->subordinate && dev->revision <= 0x12) {
855 dev_info(&dev->dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
856 dev->revision);
857 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
858 }
859}
860DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
861
862
863
864
865
866
867
868
869
870static void quirk_via_acpi(struct pci_dev *d)
871{
872
873
874
875 u8 irq;
876 pci_read_config_byte(d, 0x42, &irq);
877 irq &= 0xf;
878 if (irq && (irq != 2))
879 d->irq = irq;
880}
881DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi);
882DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi);
883
884
885
886
887
888
889static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
890
891static void quirk_via_bridge(struct pci_dev *dev)
892{
893
894 switch (dev->device) {
895 case PCI_DEVICE_ID_VIA_82C686:
896
897
898
899 via_vlink_dev_lo = PCI_SLOT(dev->devfn);
900 via_vlink_dev_hi = PCI_SLOT(dev->devfn);
901 break;
902 case PCI_DEVICE_ID_VIA_8237:
903 case PCI_DEVICE_ID_VIA_8237A:
904 via_vlink_dev_lo = 15;
905 break;
906 case PCI_DEVICE_ID_VIA_8235:
907 via_vlink_dev_lo = 16;
908 break;
909 case PCI_DEVICE_ID_VIA_8231:
910 case PCI_DEVICE_ID_VIA_8233_0:
911 case PCI_DEVICE_ID_VIA_8233A:
912 case PCI_DEVICE_ID_VIA_8233C_0:
913 via_vlink_dev_lo = 17;
914 break;
915 }
916}
917DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_bridge);
918DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, quirk_via_bridge);
919DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0, quirk_via_bridge);
920DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A, quirk_via_bridge);
921DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233C_0, quirk_via_bridge);
922DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_bridge);
923DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge);
924DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge);
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939static void quirk_via_vlink(struct pci_dev *dev)
940{
941 u8 irq, new_irq;
942
943
944 if (via_vlink_dev_lo == -1)
945 return;
946
947 new_irq = dev->irq;
948
949
950 if (!new_irq || new_irq > 15)
951 return;
952
953
954 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi ||
955 PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
956 return;
957
958
959
960
961 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
962 if (new_irq != irq) {
963 dev_info(&dev->dev, "VIA VLink IRQ fixup, from %d to %d\n",
964 irq, new_irq);
965 udelay(15);
966 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
967 }
968}
969DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
970
971
972
973
974
975
976
977static void quirk_vt82c598_id(struct pci_dev *dev)
978{
979 pci_write_config_byte(dev, 0xfc, 0);
980 pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
981}
982DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id);
983
984
985
986
987
988
989
990static void quirk_cardbus_legacy(struct pci_dev *dev)
991{
992 pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
993}
994DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
995 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
996DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
997 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
998
999
1000
1001
1002
1003
1004
1005
1006static void quirk_amd_ordering(struct pci_dev *dev)
1007{
1008 u32 pcic;
1009 pci_read_config_dword(dev, 0x4C, &pcic);
1010 if ((pcic & 6) != 6) {
1011 pcic |= 6;
1012 dev_warn(&dev->dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
1013 pci_write_config_dword(dev, 0x4C, pcic);
1014 pci_read_config_dword(dev, 0x84, &pcic);
1015 pcic |= (1 << 23);
1016 pci_write_config_dword(dev, 0x84, pcic);
1017 }
1018}
1019DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1020DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1021
1022
1023
1024
1025
1026
1027
1028
1029static void quirk_dunord(struct pci_dev *dev)
1030{
1031 struct resource *r = &dev->resource[1];
1032
1033 r->flags |= IORESOURCE_UNSET;
1034 r->start = 0;
1035 r->end = 0xffffff;
1036}
1037DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
1038
1039
1040
1041
1042
1043
1044
1045static void quirk_transparent_bridge(struct pci_dev *dev)
1046{
1047 dev->transparent = 1;
1048}
1049DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge);
1050DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge);
1051
1052
1053
1054
1055
1056
1057
1058static void quirk_mediagx_master(struct pci_dev *dev)
1059{
1060 u8 reg;
1061
1062 pci_read_config_byte(dev, 0x41, ®);
1063 if (reg & 2) {
1064 reg &= ~2;
1065 dev_info(&dev->dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
1066 reg);
1067 pci_write_config_byte(dev, 0x41, reg);
1068 }
1069}
1070DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1071DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1072
1073
1074
1075
1076
1077
1078static void quirk_disable_pxb(struct pci_dev *pdev)
1079{
1080 u16 config;
1081
1082 if (pdev->revision != 0x04)
1083 return;
1084 pci_read_config_word(pdev, 0x40, &config);
1085 if (config & (1<<6)) {
1086 config &= ~(1<<6);
1087 pci_write_config_word(pdev, 0x40, config);
1088 dev_info(&pdev->dev, "C0 revision 450NX. Disabling PCI restreaming\n");
1089 }
1090}
1091DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
1092DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
1093
1094static void quirk_amd_ide_mode(struct pci_dev *pdev)
1095{
1096
1097 u8 tmp;
1098
1099 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
1100 if (tmp == 0x01) {
1101 pci_read_config_byte(pdev, 0x40, &tmp);
1102 pci_write_config_byte(pdev, 0x40, tmp|1);
1103 pci_write_config_byte(pdev, 0x9, 1);
1104 pci_write_config_byte(pdev, 0xa, 6);
1105 pci_write_config_byte(pdev, 0x40, tmp);
1106
1107 pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
1108 dev_info(&pdev->dev, "set SATA to AHCI mode\n");
1109 }
1110}
1111DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1112DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1113DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1114DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1115DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1116DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1117DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1118DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1119
1120
1121
1122
1123static void quirk_svwks_csb5ide(struct pci_dev *pdev)
1124{
1125 u8 prog;
1126 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1127 if (prog & 5) {
1128 prog &= ~5;
1129 pdev->class &= ~5;
1130 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1131
1132 }
1133}
1134DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
1135
1136
1137
1138
1139static void quirk_ide_samemode(struct pci_dev *pdev)
1140{
1141 u8 prog;
1142
1143 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1144
1145 if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
1146 dev_info(&pdev->dev, "IDE mode mismatch; forcing legacy mode\n");
1147 prog &= ~5;
1148 pdev->class &= ~5;
1149 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1150 }
1151}
1152DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
1153
1154
1155
1156
1157
1158static void quirk_no_ata_d3(struct pci_dev *pdev)
1159{
1160 pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
1161}
1162
1163DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID,
1164 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1165DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
1166 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1167
1168DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
1169 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1170
1171
1172DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
1173 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1174
1175
1176
1177
1178static void quirk_eisa_bridge(struct pci_dev *dev)
1179{
1180 dev->class = PCI_CLASS_BRIDGE_EISA << 8;
1181}
1182DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge);
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210static int asus_hides_smbus;
1211
1212static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
1213{
1214 if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1215 if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
1216 switch (dev->subsystem_device) {
1217 case 0x8025:
1218 case 0x8070:
1219 case 0x8088:
1220 case 0x1626:
1221 asus_hides_smbus = 1;
1222 }
1223 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
1224 switch (dev->subsystem_device) {
1225 case 0x80b1:
1226 case 0x80b2:
1227 case 0x8093:
1228 asus_hides_smbus = 1;
1229 }
1230 else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
1231 switch (dev->subsystem_device) {
1232 case 0x8030:
1233 asus_hides_smbus = 1;
1234 }
1235 else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
1236 switch (dev->subsystem_device) {
1237 case 0x8070:
1238 asus_hides_smbus = 1;
1239 }
1240 else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
1241 switch (dev->subsystem_device) {
1242 case 0x80c9:
1243 asus_hides_smbus = 1;
1244 }
1245 else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
1246 switch (dev->subsystem_device) {
1247 case 0x1751:
1248 case 0x1821:
1249 case 0x1897:
1250 asus_hides_smbus = 1;
1251 }
1252 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1253 switch (dev->subsystem_device) {
1254 case 0x184b:
1255 case 0x186a:
1256 asus_hides_smbus = 1;
1257 }
1258 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1259 switch (dev->subsystem_device) {
1260 case 0x80f2:
1261 asus_hides_smbus = 1;
1262 }
1263 else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
1264 switch (dev->subsystem_device) {
1265 case 0x1882:
1266 case 0x1977:
1267 asus_hides_smbus = 1;
1268 }
1269 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
1270 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1271 switch (dev->subsystem_device) {
1272 case 0x088C:
1273 case 0x0890:
1274 asus_hides_smbus = 1;
1275 }
1276 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1277 switch (dev->subsystem_device) {
1278 case 0x12bc:
1279 case 0x12bd:
1280 case 0x006a:
1281 asus_hides_smbus = 1;
1282 }
1283 else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
1284 switch (dev->subsystem_device) {
1285 case 0x12bf:
1286 asus_hides_smbus = 1;
1287 }
1288 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
1289 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1290 switch (dev->subsystem_device) {
1291 case 0xC00C:
1292 asus_hides_smbus = 1;
1293 }
1294 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
1295 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1296 switch (dev->subsystem_device) {
1297 case 0x0058:
1298 asus_hides_smbus = 1;
1299 }
1300 else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
1301 switch (dev->subsystem_device) {
1302 case 0xB16C:
1303
1304
1305
1306 asus_hides_smbus = 1;
1307 }
1308 else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
1309 switch (dev->subsystem_device) {
1310 case 0x00b8:
1311 case 0x00b9:
1312 case 0x00ba:
1313
1314
1315
1316
1317
1318 asus_hides_smbus = 1;
1319 }
1320 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
1321 switch (dev->subsystem_device) {
1322 case 0x001A:
1323
1324
1325
1326 asus_hides_smbus = 1;
1327 }
1328 }
1329}
1330DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge);
1331DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge);
1332DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge);
1333DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge);
1334DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, asus_hides_smbus_hostbridge);
1335DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge);
1336DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge);
1337DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge);
1338DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge);
1339DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
1340
1341DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
1342DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
1343DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
1344
1345static void asus_hides_smbus_lpc(struct pci_dev *dev)
1346{
1347 u16 val;
1348
1349 if (likely(!asus_hides_smbus))
1350 return;
1351
1352 pci_read_config_word(dev, 0xF2, &val);
1353 if (val & 0x8) {
1354 pci_write_config_word(dev, 0xF2, val & (~0x8));
1355 pci_read_config_word(dev, 0xF2, &val);
1356 if (val & 0x8)
1357 dev_info(&dev->dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
1358 val);
1359 else
1360 dev_info(&dev->dev, "Enabled i801 SMBus device\n");
1361 }
1362}
1363DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1364DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1365DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1366DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1367DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1368DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1369DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1370DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1371DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1372DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1373DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1374DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1375DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1376DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1377
1378
1379static void __iomem *asus_rcba_base;
1380static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
1381{
1382 u32 rcba;
1383
1384 if (likely(!asus_hides_smbus))
1385 return;
1386 WARN_ON(asus_rcba_base);
1387
1388 pci_read_config_dword(dev, 0xF0, &rcba);
1389
1390 asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
1391 if (asus_rcba_base == NULL)
1392 return;
1393}
1394
1395static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
1396{
1397 u32 val;
1398
1399 if (likely(!asus_hides_smbus || !asus_rcba_base))
1400 return;
1401
1402 val = readl(asus_rcba_base + 0x3418);
1403 writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418);
1404}
1405
1406static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
1407{
1408 if (likely(!asus_hides_smbus || !asus_rcba_base))
1409 return;
1410 iounmap(asus_rcba_base);
1411 asus_rcba_base = NULL;
1412 dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n");
1413}
1414
1415static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1416{
1417 asus_hides_smbus_lpc_ich6_suspend(dev);
1418 asus_hides_smbus_lpc_ich6_resume_early(dev);
1419 asus_hides_smbus_lpc_ich6_resume(dev);
1420}
1421DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6);
1422DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend);
1423DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
1424DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
1425
1426
1427
1428
1429static void quirk_sis_96x_smbus(struct pci_dev *dev)
1430{
1431 u8 val = 0;
1432 pci_read_config_byte(dev, 0x77, &val);
1433 if (val & 0x10) {
1434 dev_info(&dev->dev, "Enabling SiS 96x SMBus\n");
1435 pci_write_config_byte(dev, 0x77, val & ~0x10);
1436 }
1437}
1438DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1439DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1440DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1441DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1442DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1443DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1444DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1445DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455#define SIS_DETECT_REGISTER 0x40
1456
1457static void quirk_sis_503(struct pci_dev *dev)
1458{
1459 u8 reg;
1460 u16 devid;
1461
1462 pci_read_config_byte(dev, SIS_DETECT_REGISTER, ®);
1463 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
1464 pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
1465 if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
1466 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
1467 return;
1468 }
1469
1470
1471
1472
1473
1474
1475 dev->device = devid;
1476 quirk_sis_96x_smbus(dev);
1477}
1478DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1479DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1480
1481
1482
1483
1484
1485
1486
1487
1488static void asus_hides_ac97_lpc(struct pci_dev *dev)
1489{
1490 u8 val;
1491 int asus_hides_ac97 = 0;
1492
1493 if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1494 if (dev->device == PCI_DEVICE_ID_VIA_8237)
1495 asus_hides_ac97 = 1;
1496 }
1497
1498 if (!asus_hides_ac97)
1499 return;
1500
1501 pci_read_config_byte(dev, 0x50, &val);
1502 if (val & 0xc0) {
1503 pci_write_config_byte(dev, 0x50, val & (~0xc0));
1504 pci_read_config_byte(dev, 0x50, &val);
1505 if (val & 0xc0)
1506 dev_info(&dev->dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
1507 val);
1508 else
1509 dev_info(&dev->dev, "Enabled onboard AC97/MC97 devices\n");
1510 }
1511}
1512DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1513DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1514
1515#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
1516
1517
1518
1519
1520
1521
1522static void quirk_jmicron_ata(struct pci_dev *pdev)
1523{
1524 u32 conf1, conf5, class;
1525 u8 hdr;
1526
1527
1528 if (PCI_FUNC(pdev->devfn))
1529 return;
1530
1531 pci_read_config_dword(pdev, 0x40, &conf1);
1532 pci_read_config_dword(pdev, 0x80, &conf5);
1533
1534 conf1 &= ~0x00CFF302;
1535 conf5 &= ~(1 << 24);
1536
1537 switch (pdev->device) {
1538 case PCI_DEVICE_ID_JMICRON_JMB360:
1539 case PCI_DEVICE_ID_JMICRON_JMB362:
1540 case PCI_DEVICE_ID_JMICRON_JMB364:
1541
1542 conf1 |= 0x0002A100;
1543 break;
1544
1545 case PCI_DEVICE_ID_JMICRON_JMB365:
1546 case PCI_DEVICE_ID_JMICRON_JMB366:
1547
1548 conf5 |= (1 << 24);
1549
1550 case PCI_DEVICE_ID_JMICRON_JMB361:
1551 case PCI_DEVICE_ID_JMICRON_JMB363:
1552 case PCI_DEVICE_ID_JMICRON_JMB369:
1553
1554
1555 conf1 |= 0x00C2A1B3;
1556 break;
1557
1558 case PCI_DEVICE_ID_JMICRON_JMB368:
1559
1560 conf1 |= 0x00C00000;
1561 break;
1562 }
1563
1564 pci_write_config_dword(pdev, 0x40, conf1);
1565 pci_write_config_dword(pdev, 0x80, conf5);
1566
1567
1568 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
1569 pdev->hdr_type = hdr & 0x7f;
1570 pdev->multifunction = !!(hdr & 0x80);
1571
1572 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
1573 pdev->class = class >> 8;
1574}
1575DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1576DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1577DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1578DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1579DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1580DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1581DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1582DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1583DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1584DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1585DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1586DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1587DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1588DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1589DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1590DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1591DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1592DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1593
1594#endif
1595
1596static void quirk_jmicron_async_suspend(struct pci_dev *dev)
1597{
1598 if (dev->multifunction) {
1599 device_disable_async_suspend(&dev->dev);
1600 dev_info(&dev->dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
1601 }
1602}
1603DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend);
1604DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend);
1605DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend);
1606DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend);
1607
1608#ifdef CONFIG_X86_IO_APIC
1609static void quirk_alder_ioapic(struct pci_dev *pdev)
1610{
1611 int i;
1612
1613 if ((pdev->class >> 8) != 0xff00)
1614 return;
1615
1616
1617
1618
1619 if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
1620 insert_resource(&iomem_resource, &pdev->resource[0]);
1621
1622
1623
1624 for (i = 1; i < 6; i++)
1625 memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
1626}
1627DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
1628#endif
1629
1630static void quirk_pcie_mch(struct pci_dev *pdev)
1631{
1632 pdev->no_msi = 1;
1633}
1634DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
1635DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
1636DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
1637DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, quirk_pcie_mch);
1638
1639
1640
1641
1642
1643
1644static void quirk_pcie_pxh(struct pci_dev *dev)
1645{
1646 dev->no_msi = 1;
1647 dev_warn(&dev->dev, "PXH quirk detected; SHPC device MSI disabled\n");
1648}
1649DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh);
1650DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh);
1651DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh);
1652DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh);
1653DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
1654
1655
1656
1657
1658
1659static void quirk_intel_pcie_pm(struct pci_dev *dev)
1660{
1661 pci_pm_d3_delay = 120;
1662 dev->no_d1d2 = 1;
1663}
1664
1665DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
1666DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm);
1667DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm);
1668DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm);
1669DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm);
1670DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm);
1671DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm);
1672DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm);
1673DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm);
1674DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm);
1675DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm);
1676DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm);
1677DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm);
1678DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm);
1679DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm);
1680DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm);
1681DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm);
1682DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm);
1683DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
1684DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
1685DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
1686
1687#ifdef CONFIG_X86_IO_APIC
1688static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
1689{
1690 noioapicreroute = 1;
1691 pr_info("%s detected: disable boot interrupt reroute\n", d->ident);
1692
1693 return 0;
1694}
1695
1696static struct dmi_system_id boot_interrupt_dmi_table[] = {
1697
1698
1699
1700 {
1701 .callback = dmi_disable_ioapicreroute,
1702 .ident = "ASUSTek Computer INC. M2N-LR",
1703 .matches = {
1704 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer INC."),
1705 DMI_MATCH(DMI_PRODUCT_NAME, "M2N-LR"),
1706 },
1707 },
1708 {}
1709};
1710
1711
1712
1713
1714
1715
1716
1717static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
1718{
1719 dmi_check_system(boot_interrupt_dmi_table);
1720 if (noioapicquirk || noioapicreroute)
1721 return;
1722
1723 dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
1724 dev_info(&dev->dev, "rerouting interrupts for [%04x:%04x]\n",
1725 dev->vendor, dev->device);
1726}
1727DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1728DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1729DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1730DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1731DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1732DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1733DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1734DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1735DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1736DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1737DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1738DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1739DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1740DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1741DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1742DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753#define INTEL_6300_IOAPIC_ABAR 0x40
1754#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
1755
1756static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
1757{
1758 u16 pci_config_word;
1759
1760 if (noioapicquirk)
1761 return;
1762
1763 pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word);
1764 pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
1765 pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
1766
1767 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1768 dev->vendor, dev->device);
1769}
1770DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
1771DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
1772
1773
1774
1775
1776#define BC_HT1000_FEATURE_REG 0x64
1777#define BC_HT1000_PIC_REGS_ENABLE (1<<0)
1778#define BC_HT1000_MAP_IDX 0xC00
1779#define BC_HT1000_MAP_DATA 0xC01
1780
1781static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
1782{
1783 u32 pci_config_dword;
1784 u8 irq;
1785
1786 if (noioapicquirk)
1787 return;
1788
1789 pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
1790 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
1791 BC_HT1000_PIC_REGS_ENABLE);
1792
1793 for (irq = 0x10; irq < 0x10 + 32; irq++) {
1794 outb(irq, BC_HT1000_MAP_IDX);
1795 outb(0x00, BC_HT1000_MAP_DATA);
1796 }
1797
1798 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
1799
1800 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1801 dev->vendor, dev->device);
1802}
1803DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
1804DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814#define AMD_813X_MISC 0x40
1815#define AMD_813X_NOIOAMODE (1<<0)
1816#define AMD_813X_REV_B1 0x12
1817#define AMD_813X_REV_B2 0x13
1818
1819static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
1820{
1821 u32 pci_config_dword;
1822
1823 if (noioapicquirk)
1824 return;
1825 if ((dev->revision == AMD_813X_REV_B1) ||
1826 (dev->revision == AMD_813X_REV_B2))
1827 return;
1828
1829 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
1830 pci_config_dword &= ~AMD_813X_NOIOAMODE;
1831 pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
1832
1833 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1834 dev->vendor, dev->device);
1835}
1836DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
1837DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
1838DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
1839DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
1840
1841#define AMD_8111_PCI_IRQ_ROUTING 0x56
1842
1843static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
1844{
1845 u16 pci_config_word;
1846
1847 if (noioapicquirk)
1848 return;
1849
1850 pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
1851 if (!pci_config_word) {
1852 dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] already disabled\n",
1853 dev->vendor, dev->device);
1854 return;
1855 }
1856 pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
1857 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1858 dev->vendor, dev->device);
1859}
1860DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
1861DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
1862#endif
1863
1864
1865
1866
1867
1868
1869static void quirk_tc86c001_ide(struct pci_dev *dev)
1870{
1871 struct resource *r = &dev->resource[0];
1872
1873 if (r->start & 0x8) {
1874 r->flags |= IORESOURCE_UNSET;
1875 r->start = 0;
1876 r->end = 0xf;
1877 }
1878}
1879DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
1880 PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
1881 quirk_tc86c001_ide);
1882
1883
1884
1885
1886
1887
1888
1889
1890static void quirk_plx_pci9050(struct pci_dev *dev)
1891{
1892 unsigned int bar;
1893
1894
1895 if (dev->revision >= 2)
1896 return;
1897 for (bar = 0; bar <= 1; bar++)
1898 if (pci_resource_len(dev, bar) == 0x80 &&
1899 (pci_resource_start(dev, bar) & 0x80)) {
1900 struct resource *r = &dev->resource[bar];
1901 dev_info(&dev->dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
1902 bar);
1903 r->flags |= IORESOURCE_UNSET;
1904 r->start = 0;
1905 r->end = 0xff;
1906 }
1907}
1908DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
1909 quirk_plx_pci9050);
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050);
1920DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050);
1921
1922static void quirk_netmos(struct pci_dev *dev)
1923{
1924 unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
1925 unsigned int num_serial = dev->subsystem_device & 0xf;
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937 switch (dev->device) {
1938 case PCI_DEVICE_ID_NETMOS_9835:
1939
1940 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
1941 dev->subsystem_device == 0x0299)
1942 return;
1943 case PCI_DEVICE_ID_NETMOS_9735:
1944 case PCI_DEVICE_ID_NETMOS_9745:
1945 case PCI_DEVICE_ID_NETMOS_9845:
1946 case PCI_DEVICE_ID_NETMOS_9855:
1947 if (num_parallel) {
1948 dev_info(&dev->dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
1949 dev->device, num_parallel, num_serial);
1950 dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
1951 (dev->class & 0xff);
1952 }
1953 }
1954}
1955DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
1956 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
1957
1958
1959
1960
1961
1962
1963static void quirk_f0_vpd_link(struct pci_dev *dev)
1964{
1965 struct pci_dev *f0;
1966
1967 if (!PCI_FUNC(dev->devfn))
1968 return;
1969
1970 f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
1971 if (!f0)
1972 return;
1973
1974 if (f0->vpd && dev->class == f0->class &&
1975 dev->vendor == f0->vendor && dev->device == f0->device)
1976 dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
1977
1978 pci_dev_put(f0);
1979}
1980DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
1981 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
1982
1983static void quirk_e100_interrupt(struct pci_dev *dev)
1984{
1985 u16 command, pmcsr;
1986 u8 __iomem *csr;
1987 u8 cmd_hi;
1988
1989 switch (dev->device) {
1990
1991 case 0x1029:
1992 case 0x1030 ... 0x1034:
1993 case 0x1038 ... 0x103E:
1994 case 0x1050 ... 0x1057:
1995 case 0x1059:
1996 case 0x1064 ... 0x106B:
1997 case 0x1091 ... 0x1095:
1998 case 0x1209:
1999 case 0x1229:
2000 case 0x2449:
2001 case 0x2459:
2002 case 0x245D:
2003 case 0x27DC:
2004 break;
2005 default:
2006 return;
2007 }
2008
2009
2010
2011
2012
2013
2014
2015
2016 pci_read_config_word(dev, PCI_COMMAND, &command);
2017
2018 if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
2019 return;
2020
2021
2022
2023
2024
2025 if (dev->pm_cap) {
2026 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2027 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
2028 return;
2029 }
2030
2031
2032 csr = ioremap(pci_resource_start(dev, 0), 8);
2033 if (!csr) {
2034 dev_warn(&dev->dev, "Can't map e100 registers\n");
2035 return;
2036 }
2037
2038 cmd_hi = readb(csr + 3);
2039 if (cmd_hi == 0) {
2040 dev_warn(&dev->dev, "Firmware left e100 interrupts enabled; disabling\n");
2041 writeb(1, csr + 3);
2042 }
2043
2044 iounmap(csr);
2045}
2046DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
2047 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt);
2048
2049
2050
2051
2052
2053static void quirk_disable_aspm_l0s(struct pci_dev *dev)
2054{
2055 dev_info(&dev->dev, "Disabling L0s\n");
2056 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
2057}
2058DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
2059DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
2060DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
2061DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
2062DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
2063DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
2064DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
2065DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
2066DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
2067DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
2068DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
2069DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
2070DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
2071DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
2072
2073static void fixup_rev1_53c810(struct pci_dev *dev)
2074{
2075 u32 class = dev->class;
2076
2077
2078
2079
2080
2081 if (class)
2082 return;
2083
2084 dev->class = PCI_CLASS_STORAGE_SCSI << 8;
2085 dev_info(&dev->dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n",
2086 class, dev->class);
2087}
2088DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
2089
2090
2091static void quirk_p64h2_1k_io(struct pci_dev *dev)
2092{
2093 u16 en1k;
2094
2095 pci_read_config_word(dev, 0x40, &en1k);
2096
2097 if (en1k & 0x200) {
2098 dev_info(&dev->dev, "Enable I/O Space to 1KB granularity\n");
2099 dev->io_window_1k = 1;
2100 }
2101}
2102DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
2103
2104
2105
2106
2107
2108static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
2109{
2110 uint8_t b;
2111 if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
2112 if (!(b & 0x20)) {
2113 pci_write_config_byte(dev, 0xf41, b | 0x20);
2114 dev_info(&dev->dev, "Linking AER extended capability\n");
2115 }
2116 }
2117}
2118DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2119 quirk_nvidia_ck804_pcie_aer_ext_cap);
2120DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2121 quirk_nvidia_ck804_pcie_aer_ext_cap);
2122
2123static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
2124{
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136 struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,
2137 PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
2138 uint8_t b;
2139
2140
2141
2142 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
2143 if (!p)
2144 return;
2145 pci_dev_put(p);
2146
2147 if (pci_read_config_byte(dev, 0x76, &b) == 0) {
2148 if (b & 0x40) {
2149
2150 pci_write_config_byte(dev, 0x76, b ^ 0x40);
2151
2152 dev_info(&dev->dev, "Disabling VIA CX700 PCI parking\n");
2153 }
2154 }
2155
2156 if (pci_read_config_byte(dev, 0x72, &b) == 0) {
2157 if (b != 0) {
2158
2159 pci_write_config_byte(dev, 0x72, 0x0);
2160
2161
2162 pci_write_config_byte(dev, 0x75, 0x1);
2163
2164
2165 pci_write_config_byte(dev, 0x77, 0x0);
2166
2167 dev_info(&dev->dev, "Disabling VIA CX700 PCI caching\n");
2168 }
2169 }
2170}
2171DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
2172
2173
2174
2175
2176
2177
2178
2179
2180static void quirk_blacklist_vpd(struct pci_dev *dev)
2181{
2182 if (dev->vpd) {
2183 dev->vpd->len = 0;
2184 dev_warn(&dev->dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
2185 }
2186}
2187
2188DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
2189DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
2190DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
2191DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
2192DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
2193DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
2194DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
2195DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
2196DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
2197DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
2198DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
2199DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
2200 quirk_blacklist_vpd);
2201DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214static void quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
2215{
2216
2217
2218
2219
2220 if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
2221 (dev->device == PCI_DEVICE_ID_NX2_5706S) ||
2222 (dev->device == PCI_DEVICE_ID_NX2_5708) ||
2223 (dev->device == PCI_DEVICE_ID_NX2_5708S) ||
2224 ((dev->device == PCI_DEVICE_ID_NX2_5709) &&
2225 (dev->revision & 0xf0) == 0x0)) {
2226 if (dev->vpd)
2227 dev->vpd->len = 0x80;
2228 }
2229}
2230
2231DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2232 PCI_DEVICE_ID_NX2_5706,
2233 quirk_brcm_570x_limit_vpd);
2234DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2235 PCI_DEVICE_ID_NX2_5706S,
2236 quirk_brcm_570x_limit_vpd);
2237DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2238 PCI_DEVICE_ID_NX2_5708,
2239 quirk_brcm_570x_limit_vpd);
2240DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2241 PCI_DEVICE_ID_NX2_5708S,
2242 quirk_brcm_570x_limit_vpd);
2243DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2244 PCI_DEVICE_ID_NX2_5709,
2245 quirk_brcm_570x_limit_vpd);
2246DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2247 PCI_DEVICE_ID_NX2_5709S,
2248 quirk_brcm_570x_limit_vpd);
2249
2250static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
2251{
2252 u32 rev;
2253
2254 pci_read_config_dword(dev, 0xf4, &rev);
2255
2256
2257 if (rev == 0x05719000) {
2258 int readrq = pcie_get_readrq(dev);
2259 if (readrq > 2048)
2260 pcie_set_readrq(dev, 2048);
2261 }
2262}
2263
2264DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
2265 PCI_DEVICE_ID_TIGON3_5719,
2266 quirk_brcm_5719_limit_mrrs);
2267
2268#ifdef CONFIG_PCIE_IPROC_PLATFORM
2269static void quirk_paxc_bridge(struct pci_dev *pdev)
2270{
2271
2272
2273
2274
2275 if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
2276 pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
2277
2278
2279
2280
2281
2282
2283 pdev->pcie_mpss = 2;
2284}
2285DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
2286DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
2287#endif
2288
2289
2290
2291
2292
2293
2294
2295static void quirk_unhide_mch_dev6(struct pci_dev *dev)
2296{
2297 u8 reg;
2298
2299 if (pci_read_config_byte(dev, 0xF4, ®) == 0 && !(reg & 0x02)) {
2300 dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n");
2301 pci_write_config_byte(dev, 0xF4, reg | 0x02);
2302 }
2303}
2304
2305DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
2306 quirk_unhide_mch_dev6);
2307DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
2308 quirk_unhide_mch_dev6);
2309
2310#ifdef CONFIG_TILEPRO
2311
2312
2313
2314
2315
2316
2317
2318
2319static void quirk_tile_plx_gen1(struct pci_dev *dev)
2320{
2321 if (tile_plx_gen1) {
2322 pci_write_config_dword(dev, 0x98, 0x1);
2323 mdelay(50);
2324 }
2325}
2326DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
2327#endif
2328
2329#ifdef CONFIG_PCI_MSI
2330
2331
2332
2333
2334
2335
2336static void quirk_disable_all_msi(struct pci_dev *dev)
2337{
2338 pci_no_msi();
2339 dev_warn(&dev->dev, "MSI quirk detected; MSI disabled\n");
2340}
2341DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
2342DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
2343DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
2344DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
2345DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
2346DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
2347DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
2348DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
2349
2350
2351static void quirk_disable_msi(struct pci_dev *dev)
2352{
2353 if (dev->subordinate) {
2354 dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
2355 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2356 }
2357}
2358DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
2359DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
2360DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
2361
2362
2363
2364
2365
2366
2367
2368static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
2369{
2370 struct pci_dev *apc_bridge;
2371
2372 apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0));
2373 if (apc_bridge) {
2374 if (apc_bridge->device == 0x9602)
2375 quirk_disable_msi(apc_bridge);
2376 pci_dev_put(apc_bridge);
2377 }
2378}
2379DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
2380DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
2381
2382
2383
2384static int msi_ht_cap_enabled(struct pci_dev *dev)
2385{
2386 int pos, ttl = PCI_FIND_CAP_TTL;
2387
2388 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2389 while (pos && ttl--) {
2390 u8 flags;
2391
2392 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2393 &flags) == 0) {
2394 dev_info(&dev->dev, "Found %s HT MSI Mapping\n",
2395 flags & HT_MSI_FLAGS_ENABLE ?
2396 "enabled" : "disabled");
2397 return (flags & HT_MSI_FLAGS_ENABLE) != 0;
2398 }
2399
2400 pos = pci_find_next_ht_capability(dev, pos,
2401 HT_CAPTYPE_MSI_MAPPING);
2402 }
2403 return 0;
2404}
2405
2406
2407static void quirk_msi_ht_cap(struct pci_dev *dev)
2408{
2409 if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
2410 dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
2411 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2412 }
2413}
2414DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
2415 quirk_msi_ht_cap);
2416
2417
2418
2419
2420static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
2421{
2422 struct pci_dev *pdev;
2423
2424 if (!dev->subordinate)
2425 return;
2426
2427
2428
2429
2430 pdev = pci_get_slot(dev->bus, 0);
2431 if (!pdev)
2432 return;
2433 if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
2434 dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
2435 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2436 }
2437 pci_dev_put(pdev);
2438}
2439DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2440 quirk_nvidia_ck804_msi_ht_cap);
2441
2442
2443static void ht_enable_msi_mapping(struct pci_dev *dev)
2444{
2445 int pos, ttl = PCI_FIND_CAP_TTL;
2446
2447 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2448 while (pos && ttl--) {
2449 u8 flags;
2450
2451 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2452 &flags) == 0) {
2453 dev_info(&dev->dev, "Enabling HT MSI Mapping\n");
2454
2455 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2456 flags | HT_MSI_FLAGS_ENABLE);
2457 }
2458 pos = pci_find_next_ht_capability(dev, pos,
2459 HT_CAPTYPE_MSI_MAPPING);
2460 }
2461}
2462DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
2463 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
2464 ht_enable_msi_mapping);
2465
2466DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
2467 ht_enable_msi_mapping);
2468
2469
2470
2471
2472
2473static void nvenet_msi_disable(struct pci_dev *dev)
2474{
2475 const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
2476
2477 if (board_name &&
2478 (strstr(board_name, "P5N32-SLI PREMIUM") ||
2479 strstr(board_name, "P5N32-E SLI"))) {
2480 dev_info(&dev->dev, "Disabling msi for MCP55 NIC on P5N32-SLI\n");
2481 dev->no_msi = 1;
2482 }
2483}
2484DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2485 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2486 nvenet_msi_disable);
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
2499{
2500 u32 cfg;
2501
2502 if (!pci_find_capability(dev, PCI_CAP_ID_HT))
2503 return;
2504
2505 pci_read_config_dword(dev, 0x74, &cfg);
2506
2507 if (cfg & ((1 << 2) | (1 << 15))) {
2508 printk(KERN_INFO "Rewriting irq routing register on MCP55\n");
2509 cfg &= ~((1 << 2) | (1 << 15));
2510 pci_write_config_dword(dev, 0x74, cfg);
2511 }
2512}
2513
2514DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2515 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
2516 nvbridge_check_legacy_irq_routing);
2517
2518DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2519 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
2520 nvbridge_check_legacy_irq_routing);
2521
2522static int ht_check_msi_mapping(struct pci_dev *dev)
2523{
2524 int pos, ttl = PCI_FIND_CAP_TTL;
2525 int found = 0;
2526
2527
2528 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2529 while (pos && ttl--) {
2530 u8 flags;
2531
2532 if (found < 1)
2533 found = 1;
2534 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2535 &flags) == 0) {
2536 if (flags & HT_MSI_FLAGS_ENABLE) {
2537 if (found < 2) {
2538 found = 2;
2539 break;
2540 }
2541 }
2542 }
2543 pos = pci_find_next_ht_capability(dev, pos,
2544 HT_CAPTYPE_MSI_MAPPING);
2545 }
2546
2547 return found;
2548}
2549
2550static int host_bridge_with_leaf(struct pci_dev *host_bridge)
2551{
2552 struct pci_dev *dev;
2553 int pos;
2554 int i, dev_no;
2555 int found = 0;
2556
2557 dev_no = host_bridge->devfn >> 3;
2558 for (i = dev_no + 1; i < 0x20; i++) {
2559 dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
2560 if (!dev)
2561 continue;
2562
2563
2564 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2565 if (pos != 0) {
2566 pci_dev_put(dev);
2567 break;
2568 }
2569
2570 if (ht_check_msi_mapping(dev)) {
2571 found = 1;
2572 pci_dev_put(dev);
2573 break;
2574 }
2575 pci_dev_put(dev);
2576 }
2577
2578 return found;
2579}
2580
2581#define PCI_HT_CAP_SLAVE_CTRL0 4
2582#define PCI_HT_CAP_SLAVE_CTRL1 8
2583
2584static int is_end_of_ht_chain(struct pci_dev *dev)
2585{
2586 int pos, ctrl_off;
2587 int end = 0;
2588 u16 flags, ctrl;
2589
2590 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2591
2592 if (!pos)
2593 goto out;
2594
2595 pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
2596
2597 ctrl_off = ((flags >> 10) & 1) ?
2598 PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
2599 pci_read_config_word(dev, pos + ctrl_off, &ctrl);
2600
2601 if (ctrl & (1 << 6))
2602 end = 1;
2603
2604out:
2605 return end;
2606}
2607
2608static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
2609{
2610 struct pci_dev *host_bridge;
2611 int pos;
2612 int i, dev_no;
2613 int found = 0;
2614
2615 dev_no = dev->devfn >> 3;
2616 for (i = dev_no; i >= 0; i--) {
2617 host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
2618 if (!host_bridge)
2619 continue;
2620
2621 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2622 if (pos != 0) {
2623 found = 1;
2624 break;
2625 }
2626 pci_dev_put(host_bridge);
2627 }
2628
2629 if (!found)
2630 return;
2631
2632
2633 if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
2634 host_bridge_with_leaf(host_bridge))
2635 goto out;
2636
2637
2638 if (msi_ht_cap_enabled(host_bridge))
2639 goto out;
2640
2641 ht_enable_msi_mapping(dev);
2642
2643out:
2644 pci_dev_put(host_bridge);
2645}
2646
2647static void ht_disable_msi_mapping(struct pci_dev *dev)
2648{
2649 int pos, ttl = PCI_FIND_CAP_TTL;
2650
2651 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2652 while (pos && ttl--) {
2653 u8 flags;
2654
2655 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2656 &flags) == 0) {
2657 dev_info(&dev->dev, "Disabling HT MSI Mapping\n");
2658
2659 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2660 flags & ~HT_MSI_FLAGS_ENABLE);
2661 }
2662 pos = pci_find_next_ht_capability(dev, pos,
2663 HT_CAPTYPE_MSI_MAPPING);
2664 }
2665}
2666
2667static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
2668{
2669 struct pci_dev *host_bridge;
2670 int pos;
2671 int found;
2672
2673 if (!pci_msi_enabled())
2674 return;
2675
2676
2677 found = ht_check_msi_mapping(dev);
2678
2679
2680 if (found == 0)
2681 return;
2682
2683
2684
2685
2686
2687 host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
2688 if (host_bridge == NULL) {
2689 dev_warn(&dev->dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
2690 return;
2691 }
2692
2693 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2694 if (pos != 0) {
2695
2696 if (found == 1) {
2697
2698 if (all)
2699 ht_enable_msi_mapping(dev);
2700 else
2701 nv_ht_enable_msi_mapping(dev);
2702 }
2703 goto out;
2704 }
2705
2706
2707 if (found == 1)
2708 goto out;
2709
2710
2711 ht_disable_msi_mapping(dev);
2712
2713out:
2714 pci_dev_put(host_bridge);
2715}
2716
2717static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
2718{
2719 return __nv_msi_ht_cap_quirk(dev, 1);
2720}
2721
2722static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
2723{
2724 return __nv_msi_ht_cap_quirk(dev, 0);
2725}
2726
2727DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2728DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2729
2730DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2731DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2732
2733static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
2734{
2735 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2736}
2737static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
2738{
2739 struct pci_dev *p;
2740
2741
2742
2743
2744
2745 p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
2746 NULL);
2747 if (!p)
2748 return;
2749
2750 if ((p->revision < 0x3B) && (p->revision >= 0x30))
2751 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2752 pci_dev_put(p);
2753}
2754static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
2755{
2756
2757 if (dev->revision < 0x18) {
2758 dev_info(&dev->dev, "set MSI_INTX_DISABLE_BUG flag\n");
2759 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2760 }
2761}
2762DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2763 PCI_DEVICE_ID_TIGON3_5780,
2764 quirk_msi_intx_disable_bug);
2765DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2766 PCI_DEVICE_ID_TIGON3_5780S,
2767 quirk_msi_intx_disable_bug);
2768DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2769 PCI_DEVICE_ID_TIGON3_5714,
2770 quirk_msi_intx_disable_bug);
2771DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2772 PCI_DEVICE_ID_TIGON3_5714S,
2773 quirk_msi_intx_disable_bug);
2774DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2775 PCI_DEVICE_ID_TIGON3_5715,
2776 quirk_msi_intx_disable_bug);
2777DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2778 PCI_DEVICE_ID_TIGON3_5715S,
2779 quirk_msi_intx_disable_bug);
2780
2781DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390,
2782 quirk_msi_intx_disable_ati_bug);
2783DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391,
2784 quirk_msi_intx_disable_ati_bug);
2785DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392,
2786 quirk_msi_intx_disable_ati_bug);
2787DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393,
2788 quirk_msi_intx_disable_ati_bug);
2789DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394,
2790 quirk_msi_intx_disable_ati_bug);
2791
2792DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373,
2793 quirk_msi_intx_disable_bug);
2794DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
2795 quirk_msi_intx_disable_bug);
2796DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
2797 quirk_msi_intx_disable_bug);
2798
2799DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
2800 quirk_msi_intx_disable_bug);
2801DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
2802 quirk_msi_intx_disable_bug);
2803DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
2804 quirk_msi_intx_disable_bug);
2805DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
2806 quirk_msi_intx_disable_bug);
2807DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
2808 quirk_msi_intx_disable_bug);
2809DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
2810 quirk_msi_intx_disable_bug);
2811DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090,
2812 quirk_msi_intx_disable_qca_bug);
2813DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091,
2814 quirk_msi_intx_disable_qca_bug);
2815DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0,
2816 quirk_msi_intx_disable_qca_bug);
2817DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
2818 quirk_msi_intx_disable_qca_bug);
2819DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
2820 quirk_msi_intx_disable_qca_bug);
2821#endif
2822
2823
2824
2825
2826
2827
2828
2829static void quirk_hotplug_bridge(struct pci_dev *dev)
2830{
2831 dev->is_hotplug_bridge = 1;
2832}
2833
2834DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863#ifdef CONFIG_MMC_RICOH_MMC
2864static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
2865{
2866
2867 u8 write_enable;
2868 u8 write_target;
2869 u8 disable;
2870
2871
2872 if (PCI_FUNC(dev->devfn))
2873 return;
2874
2875 pci_read_config_byte(dev, 0xB7, &disable);
2876 if (disable & 0x02)
2877 return;
2878
2879 pci_read_config_byte(dev, 0x8E, &write_enable);
2880 pci_write_config_byte(dev, 0x8E, 0xAA);
2881 pci_read_config_byte(dev, 0x8D, &write_target);
2882 pci_write_config_byte(dev, 0x8D, 0xB7);
2883 pci_write_config_byte(dev, 0xB7, disable | 0x02);
2884 pci_write_config_byte(dev, 0x8E, write_enable);
2885 pci_write_config_byte(dev, 0x8D, write_target);
2886
2887 dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via cardbus function)\n");
2888 dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
2889}
2890DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
2891DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
2892
2893static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
2894{
2895
2896 u8 write_enable;
2897 u8 disable;
2898
2899
2900 if (PCI_FUNC(dev->devfn))
2901 return;
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914 if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
2915 dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
2916 pci_write_config_byte(dev, 0xf9, 0xfc);
2917 pci_write_config_byte(dev, 0x150, 0x10);
2918 pci_write_config_byte(dev, 0xf9, 0x00);
2919 pci_write_config_byte(dev, 0xfc, 0x01);
2920 pci_write_config_byte(dev, 0xe1, 0x32);
2921 pci_write_config_byte(dev, 0xfc, 0x00);
2922
2923 dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n");
2924 }
2925
2926 pci_read_config_byte(dev, 0xCB, &disable);
2927
2928 if (disable & 0x02)
2929 return;
2930
2931 pci_read_config_byte(dev, 0xCA, &write_enable);
2932 pci_write_config_byte(dev, 0xCA, 0x57);
2933 pci_write_config_byte(dev, 0xCB, disable | 0x02);
2934 pci_write_config_byte(dev, 0xCA, write_enable);
2935
2936 dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
2937 dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
2938
2939}
2940DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
2941DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
2942DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
2943DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
2944DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
2945DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
2946#endif
2947
2948#ifdef CONFIG_DMAR_TABLE
2949#define VTUNCERRMSK_REG 0x1ac
2950#define VTD_MSK_SPEC_ERRORS (1 << 31)
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961static void vtd_mask_spec_errors(struct pci_dev *dev)
2962{
2963 u32 word;
2964
2965 pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
2966 pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
2967}
2968DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
2969DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
2970#endif
2971
2972static void fixup_ti816x_class(struct pci_dev *dev)
2973{
2974 u32 class = dev->class;
2975
2976
2977 dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
2978 dev_info(&dev->dev, "PCI class overridden (%#08x -> %#08x)\n",
2979 class, dev->class);
2980}
2981DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
2982 PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
2983
2984
2985
2986
2987static void fixup_mpss_256(struct pci_dev *dev)
2988{
2989 dev->pcie_mpss = 1;
2990}
2991DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
2992 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
2993DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
2994 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
2995DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
2996 PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
2997
2998
2999
3000
3001
3002
3003
3004
3005static void quirk_intel_mc_errata(struct pci_dev *dev)
3006{
3007 int err;
3008 u16 rcc;
3009
3010 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
3011 pcie_bus_config == PCIE_BUS_DEFAULT)
3012 return;
3013
3014
3015
3016
3017
3018 err = pci_read_config_word(dev, 0x48, &rcc);
3019 if (err) {
3020 dev_err(&dev->dev, "Error attempting to read the read completion coalescing register\n");
3021 return;
3022 }
3023
3024 if (!(rcc & (1 << 10)))
3025 return;
3026
3027 rcc &= ~(1 << 10);
3028
3029 err = pci_write_config_word(dev, 0x48, rcc);
3030 if (err) {
3031 dev_err(&dev->dev, "Error attempting to write the read completion coalescing register\n");
3032 return;
3033 }
3034
3035 pr_info_once("Read completion coalescing disabled due to hardware errata relating to 256B MPS\n");
3036}
3037
3038DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
3039DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
3040DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
3041DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
3042DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
3043DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
3044DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
3045DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
3046DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
3047DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
3048DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
3049DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
3050DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
3051DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
3052
3053DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
3054DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
3055DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
3056DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
3057DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
3058DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
3059DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
3060DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
3061DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
3062DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
3063DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
3064
3065
3066
3067
3068
3069
3070
3071static void quirk_intel_ntb(struct pci_dev *dev)
3072{
3073 int rc;
3074 u8 val;
3075
3076 rc = pci_read_config_byte(dev, 0x00D0, &val);
3077 if (rc)
3078 return;
3079
3080 dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
3081
3082 rc = pci_read_config_byte(dev, 0x00D1, &val);
3083 if (rc)
3084 return;
3085
3086 dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
3087}
3088DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
3089DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
3090
3091static ktime_t fixup_debug_start(struct pci_dev *dev,
3092 void (*fn)(struct pci_dev *dev))
3093{
3094 ktime_t calltime = 0;
3095
3096 dev_dbg(&dev->dev, "calling %pF\n", fn);
3097 if (initcall_debug) {
3098 pr_debug("calling %pF @ %i for %s\n",
3099 fn, task_pid_nr(current), dev_name(&dev->dev));
3100 calltime = ktime_get();
3101 }
3102
3103 return calltime;
3104}
3105
3106static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
3107 void (*fn)(struct pci_dev *dev))
3108{
3109 ktime_t delta, rettime;
3110 unsigned long long duration;
3111
3112 if (initcall_debug) {
3113 rettime = ktime_get();
3114 delta = ktime_sub(rettime, calltime);
3115 duration = (unsigned long long) ktime_to_ns(delta) >> 10;
3116 pr_debug("pci fixup %pF returned after %lld usecs for %s\n",
3117 fn, duration, dev_name(&dev->dev));
3118 }
3119}
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133#define I915_DEIER_REG 0x4400c
3134static void disable_igfx_irq(struct pci_dev *dev)
3135{
3136 void __iomem *regs = pci_iomap(dev, 0, 0);
3137 if (regs == NULL) {
3138 dev_warn(&dev->dev, "igfx quirk: Can't iomap PCI device\n");
3139 return;
3140 }
3141
3142
3143 if (readl(regs + I915_DEIER_REG) != 0) {
3144 dev_warn(&dev->dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
3145
3146 writel(0, regs + I915_DEIER_REG);
3147 }
3148
3149 pci_iounmap(dev, regs);
3150}
3151DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
3152DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
3153DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
3154
3155
3156
3157
3158
3159static void quirk_remove_d3_delay(struct pci_dev *dev)
3160{
3161 dev->d3_delay = 0;
3162}
3163
3164DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
3165DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
3166DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
3167
3168DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
3169DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
3170DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
3171DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
3172DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
3173DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
3174DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
3175DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
3176DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
3177DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
3178DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
3179
3180DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3_delay);
3181DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
3182DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
3183DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3_delay);
3184DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
3185DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
3186DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3_delay);
3187DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3_delay);
3188DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3_delay);
3189
3190
3191
3192
3193
3194
3195static void quirk_broken_intx_masking(struct pci_dev *dev)
3196{
3197 dev->broken_intx_masking = 1;
3198}
3199DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030,
3200 quirk_broken_intx_masking);
3201DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601,
3202 quirk_broken_intx_masking);
3203
3204
3205
3206
3207
3208
3209
3210DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
3211 quirk_broken_intx_masking);
3212
3213
3214
3215
3216
3217DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572,
3218 quirk_broken_intx_masking);
3219DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574,
3220 quirk_broken_intx_masking);
3221DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580,
3222 quirk_broken_intx_masking);
3223DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581,
3224 quirk_broken_intx_masking);
3225DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583,
3226 quirk_broken_intx_masking);
3227DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584,
3228 quirk_broken_intx_masking);
3229DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585,
3230 quirk_broken_intx_masking);
3231DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586,
3232 quirk_broken_intx_masking);
3233DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587,
3234 quirk_broken_intx_masking);
3235DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588,
3236 quirk_broken_intx_masking);
3237DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
3238 quirk_broken_intx_masking);
3239DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
3240 quirk_broken_intx_masking);
3241DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
3242 quirk_broken_intx_masking);
3243DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2,
3244 quirk_broken_intx_masking);
3245
3246static u16 mellanox_broken_intx_devs[] = {
3247 PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
3248 PCI_DEVICE_ID_MELLANOX_HERMON_DDR,
3249 PCI_DEVICE_ID_MELLANOX_HERMON_QDR,
3250 PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2,
3251 PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2,
3252 PCI_DEVICE_ID_MELLANOX_HERMON_EN,
3253 PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2,
3254 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN,
3255 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2,
3256 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2,
3257 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2,
3258 PCI_DEVICE_ID_MELLANOX_CONNECTX2,
3259 PCI_DEVICE_ID_MELLANOX_CONNECTX3,
3260 PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO,
3261};
3262
3263#define CONNECTX_4_CURR_MAX_MINOR 99
3264#define CONNECTX_4_INTX_SUPPORT_MINOR 14
3265
3266
3267
3268
3269
3270
3271
3272static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
3273{
3274 __be32 __iomem *fw_ver;
3275 u16 fw_major;
3276 u16 fw_minor;
3277 u16 fw_subminor;
3278 u32 fw_maj_min;
3279 u32 fw_sub_min;
3280 int i;
3281
3282 for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) {
3283 if (pdev->device == mellanox_broken_intx_devs[i]) {
3284 pdev->broken_intx_masking = 1;
3285 return;
3286 }
3287 }
3288
3289
3290
3291
3292 if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
3293 return;
3294
3295 if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 &&
3296 pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX)
3297 return;
3298
3299
3300 if (pci_enable_device_mem(pdev)) {
3301 dev_warn(&pdev->dev, "Can't enable device memory\n");
3302 return;
3303 }
3304
3305 fw_ver = ioremap(pci_resource_start(pdev, 0), 4);
3306 if (!fw_ver) {
3307 dev_warn(&pdev->dev, "Can't map ConnectX-4 initialization segment\n");
3308 goto out;
3309 }
3310
3311
3312 fw_maj_min = ioread32be(fw_ver);
3313 fw_sub_min = ioread32be(fw_ver + 1);
3314 fw_major = fw_maj_min & 0xffff;
3315 fw_minor = fw_maj_min >> 16;
3316 fw_subminor = fw_sub_min & 0xffff;
3317 if (fw_minor > CONNECTX_4_CURR_MAX_MINOR ||
3318 fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) {
3319 dev_warn(&pdev->dev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n",
3320 fw_major, fw_minor, fw_subminor, pdev->device ==
3321 PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14);
3322 pdev->broken_intx_masking = 1;
3323 }
3324
3325 iounmap(fw_ver);
3326
3327out:
3328 pci_disable_device(pdev);
3329}
3330DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
3331 mellanox_check_broken_intx_masking);
3332
3333static void quirk_no_bus_reset(struct pci_dev *dev)
3334{
3335 dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
3336}
3337
3338
3339
3340
3341
3342
3343
3344
3345DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
3346DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
3347DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
3348DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
3349
3350static void quirk_no_pm_reset(struct pci_dev *dev)
3351{
3352
3353
3354
3355
3356 if (!pci_is_root_bus(dev->bus))
3357 dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
3358}
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
3369 PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
3370
3371
3372
3373
3374
3375
3376static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
3377{
3378 if (pdev->is_hotplug_bridge &&
3379 (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
3380 pdev->revision <= 1))
3381 pdev->no_msi = 1;
3382}
3383DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
3384 quirk_thunderbolt_hotplug_msi);
3385DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE,
3386 quirk_thunderbolt_hotplug_msi);
3387DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK,
3388 quirk_thunderbolt_hotplug_msi);
3389DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3390 quirk_thunderbolt_hotplug_msi);
3391DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
3392 quirk_thunderbolt_hotplug_msi);
3393
3394static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
3395{
3396 pci_set_vpd_size(dev, 8192);
3397}
3398
3399DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd);
3400DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd);
3401DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd);
3402DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd);
3403DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd);
3404DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd);
3405DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd);
3406DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd);
3407DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd);
3408DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd);
3409DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd);
3410DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd);
3411DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd);
3412
3413#ifdef CONFIG_ACPI
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
3430{
3431 acpi_handle bridge, SXIO, SXFP, SXLV;
3432
3433 if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
3434 return;
3435 if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
3436 return;
3437 bridge = ACPI_HANDLE(&dev->dev);
3438 if (!bridge)
3439 return;
3440
3441
3442
3443
3444
3445
3446 if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
3447 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
3448 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
3449 return;
3450 dev_info(&dev->dev, "quirk: cutting power to thunderbolt controller...\n");
3451
3452
3453 acpi_execute_simple_method(SXIO, NULL, 1);
3454 acpi_execute_simple_method(SXFP, NULL, 0);
3455 msleep(300);
3456 acpi_execute_simple_method(SXLV, NULL, 0);
3457 acpi_execute_simple_method(SXIO, NULL, 0);
3458 acpi_execute_simple_method(SXLV, NULL, 0);
3459}
3460DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
3461 PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3462 quirk_apple_poweroff_thunderbolt);
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
3474{
3475 struct pci_dev *sibling = NULL;
3476 struct pci_dev *nhi = NULL;
3477
3478 if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
3479 return;
3480 if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
3481 return;
3482
3483
3484
3485
3486 sibling = pci_get_slot(dev->bus, 0x0);
3487 if (sibling == dev)
3488 goto out;
3489 if (!sibling || !sibling->subordinate)
3490 goto out;
3491 nhi = pci_get_slot(sibling->subordinate, 0x0);
3492 if (!nhi)
3493 goto out;
3494 if (nhi->vendor != PCI_VENDOR_ID_INTEL
3495 || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
3496 nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
3497 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI &&
3498 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
3499 || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
3500 goto out;
3501 dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
3502 device_pm_wait_for_dev(&dev->dev, &nhi->dev);
3503out:
3504 pci_dev_put(nhi);
3505 pci_dev_put(sibling);
3506}
3507DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3508 PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
3509 quirk_apple_wait_for_thunderbolt);
3510DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3511 PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3512 quirk_apple_wait_for_thunderbolt);
3513DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3514 PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE,
3515 quirk_apple_wait_for_thunderbolt);
3516DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3517 PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
3518 quirk_apple_wait_for_thunderbolt);
3519#endif
3520
3521static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
3522 struct pci_fixup *end)
3523{
3524 ktime_t calltime;
3525
3526 for (; f < end; f++)
3527 if ((f->class == (u32) (dev->class >> f->class_shift) ||
3528 f->class == (u32) PCI_ANY_ID) &&
3529 (f->vendor == dev->vendor ||
3530 f->vendor == (u16) PCI_ANY_ID) &&
3531 (f->device == dev->device ||
3532 f->device == (u16) PCI_ANY_ID)) {
3533 calltime = fixup_debug_start(dev, f->hook);
3534 f->hook(dev);
3535 fixup_debug_report(dev, calltime, f->hook);
3536 }
3537}
3538
3539extern struct pci_fixup __start_pci_fixups_early[];
3540extern struct pci_fixup __end_pci_fixups_early[];
3541extern struct pci_fixup __start_pci_fixups_header[];
3542extern struct pci_fixup __end_pci_fixups_header[];
3543extern struct pci_fixup __start_pci_fixups_final[];
3544extern struct pci_fixup __end_pci_fixups_final[];
3545extern struct pci_fixup __start_pci_fixups_enable[];
3546extern struct pci_fixup __end_pci_fixups_enable[];
3547extern struct pci_fixup __start_pci_fixups_resume[];
3548extern struct pci_fixup __end_pci_fixups_resume[];
3549extern struct pci_fixup __start_pci_fixups_resume_early[];
3550extern struct pci_fixup __end_pci_fixups_resume_early[];
3551extern struct pci_fixup __start_pci_fixups_suspend[];
3552extern struct pci_fixup __end_pci_fixups_suspend[];
3553extern struct pci_fixup __start_pci_fixups_suspend_late[];
3554extern struct pci_fixup __end_pci_fixups_suspend_late[];
3555
3556static bool pci_apply_fixup_final_quirks;
3557
3558void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
3559{
3560 struct pci_fixup *start, *end;
3561
3562 switch (pass) {
3563 case pci_fixup_early:
3564 start = __start_pci_fixups_early;
3565 end = __end_pci_fixups_early;
3566 break;
3567
3568 case pci_fixup_header:
3569 start = __start_pci_fixups_header;
3570 end = __end_pci_fixups_header;
3571 break;
3572
3573 case pci_fixup_final:
3574 if (!pci_apply_fixup_final_quirks)
3575 return;
3576 start = __start_pci_fixups_final;
3577 end = __end_pci_fixups_final;
3578 break;
3579
3580 case pci_fixup_enable:
3581 start = __start_pci_fixups_enable;
3582 end = __end_pci_fixups_enable;
3583 break;
3584
3585 case pci_fixup_resume:
3586 start = __start_pci_fixups_resume;
3587 end = __end_pci_fixups_resume;
3588 break;
3589
3590 case pci_fixup_resume_early:
3591 start = __start_pci_fixups_resume_early;
3592 end = __end_pci_fixups_resume_early;
3593 break;
3594
3595 case pci_fixup_suspend:
3596 start = __start_pci_fixups_suspend;
3597 end = __end_pci_fixups_suspend;
3598 break;
3599
3600 case pci_fixup_suspend_late:
3601 start = __start_pci_fixups_suspend_late;
3602 end = __end_pci_fixups_suspend_late;
3603 break;
3604
3605 default:
3606
3607 return;
3608 }
3609 pci_do_fixups(dev, start, end);
3610}
3611EXPORT_SYMBOL(pci_fixup_device);
3612
3613
3614static int __init pci_apply_final_quirks(void)
3615{
3616 struct pci_dev *dev = NULL;
3617 u8 cls = 0;
3618 u8 tmp;
3619
3620 if (pci_cache_line_size)
3621 printk(KERN_DEBUG "PCI: CLS %u bytes\n",
3622 pci_cache_line_size << 2);
3623
3624 pci_apply_fixup_final_quirks = true;
3625 for_each_pci_dev(dev) {
3626 pci_fixup_device(pci_fixup_final, dev);
3627
3628
3629
3630
3631
3632 if (!pci_cache_line_size) {
3633 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
3634 if (!cls)
3635 cls = tmp;
3636 if (!tmp || cls == tmp)
3637 continue;
3638
3639 printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
3640 cls << 2, tmp << 2,
3641 pci_dfl_cache_line_size << 2);
3642 pci_cache_line_size = pci_dfl_cache_line_size;
3643 }
3644 }
3645
3646 if (!pci_cache_line_size) {
3647 printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
3648 cls << 2, pci_dfl_cache_line_size << 2);
3649 pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
3650 }
3651
3652 return 0;
3653}
3654
3655fs_initcall_sync(pci_apply_final_quirks);
3656
3657
3658
3659
3660
3661
3662static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
3663{
3664
3665
3666
3667
3668
3669
3670
3671
3672 if (!probe)
3673 pcie_flr(dev);
3674 return 0;
3675}
3676
3677#define SOUTH_CHICKEN2 0xc2004
3678#define PCH_PP_STATUS 0xc7200
3679#define PCH_PP_CONTROL 0xc7204
3680#define MSG_CTL 0x45010
3681#define NSDE_PWR_STATE 0xd0100
3682#define IGD_OPERATION_TIMEOUT 10000
3683
3684static int reset_ivb_igd(struct pci_dev *dev, int probe)
3685{
3686 void __iomem *mmio_base;
3687 unsigned long timeout;
3688 u32 val;
3689
3690 if (probe)
3691 return 0;
3692
3693 mmio_base = pci_iomap(dev, 0, 0);
3694 if (!mmio_base)
3695 return -ENOMEM;
3696
3697 iowrite32(0x00000002, mmio_base + MSG_CTL);
3698
3699
3700
3701
3702
3703
3704
3705 iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2);
3706
3707 val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe;
3708 iowrite32(val, mmio_base + PCH_PP_CONTROL);
3709
3710 timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT);
3711 do {
3712 val = ioread32(mmio_base + PCH_PP_STATUS);
3713 if ((val & 0xb0000000) == 0)
3714 goto reset_complete;
3715 msleep(10);
3716 } while (time_before(jiffies, timeout));
3717 dev_warn(&dev->dev, "timeout during reset\n");
3718
3719reset_complete:
3720 iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
3721
3722 pci_iounmap(dev, mmio_base);
3723 return 0;
3724}
3725
3726
3727
3728
3729static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
3730{
3731 u16 old_command;
3732 u16 msix_flags;
3733
3734
3735
3736
3737
3738 if ((dev->device & 0xf000) != 0x4000)
3739 return -ENOTTY;
3740
3741
3742
3743
3744
3745 if (probe)
3746 return 0;
3747
3748
3749
3750
3751
3752
3753
3754 pci_read_config_word(dev, PCI_COMMAND, &old_command);
3755 pci_write_config_word(dev, PCI_COMMAND,
3756 old_command | PCI_COMMAND_MASTER);
3757
3758
3759
3760
3761
3762 pci_save_state(dev);
3763
3764
3765
3766
3767
3768
3769
3770
3771 pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
3772 if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
3773 pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
3774 msix_flags |
3775 PCI_MSIX_FLAGS_ENABLE |
3776 PCI_MSIX_FLAGS_MASKALL);
3777
3778 pcie_flr(dev);
3779
3780
3781
3782
3783
3784
3785 pci_restore_state(dev);
3786 pci_write_config_word(dev, PCI_COMMAND, old_command);
3787 return 0;
3788}
3789
3790#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
3791#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
3792#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
3793
3794static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
3795 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
3796 reset_intel_82599_sfp_virtfn },
3797 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
3798 reset_ivb_igd },
3799 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
3800 reset_ivb_igd },
3801 { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
3802 reset_chelsio_generic_dev },
3803 { 0 }
3804};
3805
3806
3807
3808
3809
3810
3811int pci_dev_specific_reset(struct pci_dev *dev, int probe)
3812{
3813 const struct pci_dev_reset_methods *i;
3814
3815 for (i = pci_dev_reset_methods; i->reset; i++) {
3816 if ((i->vendor == dev->vendor ||
3817 i->vendor == (u16)PCI_ANY_ID) &&
3818 (i->device == dev->device ||
3819 i->device == (u16)PCI_ANY_ID))
3820 return i->reset(dev, probe);
3821 }
3822
3823 return -ENOTTY;
3824}
3825
3826static void quirk_dma_func0_alias(struct pci_dev *dev)
3827{
3828 if (PCI_FUNC(dev->devfn) != 0)
3829 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
3830}
3831
3832
3833
3834
3835
3836
3837DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
3838DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
3839
3840static void quirk_dma_func1_alias(struct pci_dev *dev)
3841{
3842 if (PCI_FUNC(dev->devfn) != 1)
3843 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1));
3844}
3845
3846
3847
3848
3849
3850
3851
3852DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
3853 quirk_dma_func1_alias);
3854DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
3855 quirk_dma_func1_alias);
3856
3857DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
3858 quirk_dma_func1_alias);
3859
3860DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
3861 quirk_dma_func1_alias);
3862
3863DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
3864 quirk_dma_func1_alias);
3865
3866DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
3867 quirk_dma_func1_alias);
3868
3869DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
3870 quirk_dma_func1_alias);
3871
3872DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
3873 quirk_dma_func1_alias);
3874DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
3875 quirk_dma_func1_alias);
3876
3877DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
3878 PCI_DEVICE_ID_JMICRON_JMB388_ESD,
3879 quirk_dma_func1_alias);
3880
3881DECLARE_PCI_FIXUP_HEADER(0x1c28,
3882 0x0122,
3883 quirk_dma_func1_alias);
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900static const struct pci_device_id fixed_dma_alias_tbl[] = {
3901 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
3902 PCI_VENDOR_ID_ADAPTEC2, 0x02bb),
3903 .driver_data = PCI_DEVFN(1, 0) },
3904 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
3905 PCI_VENDOR_ID_ADAPTEC2, 0x02bc),
3906 .driver_data = PCI_DEVFN(1, 0) },
3907 { 0 }
3908};
3909
3910static void quirk_fixed_dma_alias(struct pci_dev *dev)
3911{
3912 const struct pci_device_id *id;
3913
3914 id = pci_match_id(fixed_dma_alias_tbl, dev);
3915 if (id)
3916 pci_add_dma_alias(dev, id->driver_data);
3917}
3918
3919DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev)
3931{
3932 if (!pci_is_root_bus(pdev->bus) &&
3933 pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3934 !pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) &&
3935 pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE)
3936 pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS;
3937}
3938
3939DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
3940 quirk_use_pcie_bridge_dma_alias);
3941
3942DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
3943
3944DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
3945
3946DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8893, quirk_use_pcie_bridge_dma_alias);
3947
3948DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
3949
3950
3951
3952
3953
3954
3955
3956static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
3957{
3958 pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0));
3959 pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0));
3960 pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3));
3961}
3962DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
3963DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
3964
3965
3966
3967
3968
3969
3970static void quirk_bridge_cavm_thrx2_pcie_root(struct pci_dev *pdev)
3971{
3972 pdev->dev_flags |= PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT;
3973}
3974DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000,
3975 quirk_bridge_cavm_thrx2_pcie_root);
3976DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084,
3977 quirk_bridge_cavm_thrx2_pcie_root);
3978
3979
3980
3981
3982
3983static void quirk_tw686x_class(struct pci_dev *pdev)
3984{
3985 u32 class = pdev->class;
3986
3987
3988 pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01;
3989 dev_info(&pdev->dev, "TW686x PCI class overridden (%#08x -> %#08x)\n",
3990 class, pdev->class);
3991}
3992DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8,
3993 quirk_tw686x_class);
3994DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6865, PCI_CLASS_NOT_DEFINED, 8,
3995 quirk_tw686x_class);
3996DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
3997 quirk_tw686x_class);
3998DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
3999 quirk_tw686x_class);
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
4025{
4026 struct pci_dev *root_port = pci_find_pcie_root_port(pdev);
4027
4028 if (!root_port) {
4029 dev_warn(&pdev->dev, "PCIe Completion erratum may cause device errors\n");
4030 return;
4031 }
4032
4033 dev_info(&root_port->dev, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
4034 dev_name(&pdev->dev));
4035 pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
4036 PCI_EXP_DEVCTL_RELAX_EN |
4037 PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
4038}
4039
4040
4041
4042
4043
4044static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
4045{
4046
4047
4048
4049
4050
4051
4052 if ((pdev->device & 0xff00) == 0x5400)
4053 quirk_disable_root_port_attributes(pdev);
4054}
4055DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
4056 quirk_chelsio_T5_disable_root_port_attributes);
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
4084{
4085#ifdef CONFIG_ACPI
4086 struct acpi_table_header *header = NULL;
4087 acpi_status status;
4088
4089
4090 if (!dev->multifunction || !pci_is_root_bus(dev->bus))
4091 return -ENODEV;
4092
4093
4094 status = acpi_get_table("IVRS", 0, &header);
4095 if (ACPI_FAILURE(status))
4096 return -ENODEV;
4097
4098
4099 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
4100
4101 return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
4102#else
4103 return -ENODEV;
4104#endif
4105}
4106
4107static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
4108{
4109
4110
4111
4112
4113
4114 acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
4115 PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
4116
4117 if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff)))
4118 return -ENOTTY;
4119
4120 return acs_flags ? 0 : 1;
4121}
4122
4123
4124
4125
4126
4127
4128
4129static const u16 pci_quirk_intel_pch_acs_ids[] = {
4130
4131 0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49,
4132 0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51,
4133
4134 0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17,
4135 0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f,
4136
4137 0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17,
4138 0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f,
4139
4140 0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17,
4141 0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f,
4142
4143 0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17,
4144 0x9c18, 0x9c19, 0x9c1a, 0x9c1b,
4145
4146 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
4147 0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
4148
4149 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
4150
4151 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
4152 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
4153
4154 0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e,
4155};
4156
4157static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
4158{
4159 int i;
4160
4161
4162 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4163 return false;
4164
4165 for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++)
4166 if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
4167 return true;
4168
4169 return false;
4170}
4171
4172#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
4173
4174static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
4175{
4176 u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
4177 INTEL_PCH_ACS_FLAGS : 0;
4178
4179 if (!pci_quirk_intel_pch_acs_match(dev))
4180 return -ENOTTY;
4181
4182 return acs_flags & ~flags ? 0 : 1;
4183}
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4196{
4197 u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV);
4198 int ret = acs_flags & ~flags ? 0 : 1;
4199
4200 dev_info(&dev->dev, "Using QCOM ACS Quirk (%d)\n", ret);
4201
4202 return ret;
4203}
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4238{
4239 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4240 return false;
4241
4242 switch (dev->device) {
4243 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a:
4244 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee:
4245 return true;
4246 }
4247
4248 return false;
4249}
4250
4251#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
4252
4253static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
4254{
4255 int pos;
4256 u32 cap, ctrl;
4257
4258 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4259 return -ENOTTY;
4260
4261 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
4262 if (!pos)
4263 return -ENOTTY;
4264
4265
4266 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4267 acs_flags &= (cap | PCI_ACS_EC);
4268
4269 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4270
4271 return acs_flags & ~ctrl ? 0 : 1;
4272}
4273
4274static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
4275{
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285 acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
4286 PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
4287
4288 return acs_flags ? 0 : 1;
4289}
4290
4291static const struct pci_dev_acs_enabled {
4292 u16 vendor;
4293 u16 device;
4294 int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
4295} pci_dev_acs_enabled[] = {
4296 { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
4297 { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
4298 { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
4299 { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
4300 { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
4301 { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
4302 { PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs },
4303 { PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
4304 { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
4305 { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
4306 { PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs },
4307 { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
4308 { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
4309 { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
4310 { PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
4311 { PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
4312 { PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
4313 { PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
4314 { PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
4315 { PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
4316 { PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
4317 { PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
4318 { PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
4319 { PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
4320 { PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
4321 { PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
4322 { PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
4323 { PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
4324 { PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
4325 { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
4326 { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
4327
4328 { PCI_VENDOR_ID_INTEL, 0x1509, pci_quirk_mf_endpoint_acs },
4329 { PCI_VENDOR_ID_INTEL, 0x150E, pci_quirk_mf_endpoint_acs },
4330 { PCI_VENDOR_ID_INTEL, 0x150F, pci_quirk_mf_endpoint_acs },
4331 { PCI_VENDOR_ID_INTEL, 0x1510, pci_quirk_mf_endpoint_acs },
4332 { PCI_VENDOR_ID_INTEL, 0x1511, pci_quirk_mf_endpoint_acs },
4333 { PCI_VENDOR_ID_INTEL, 0x1516, pci_quirk_mf_endpoint_acs },
4334 { PCI_VENDOR_ID_INTEL, 0x1527, pci_quirk_mf_endpoint_acs },
4335
4336 { PCI_VENDOR_ID_INTEL, 0x10C9, pci_quirk_mf_endpoint_acs },
4337 { PCI_VENDOR_ID_INTEL, 0x10E6, pci_quirk_mf_endpoint_acs },
4338 { PCI_VENDOR_ID_INTEL, 0x10E7, pci_quirk_mf_endpoint_acs },
4339 { PCI_VENDOR_ID_INTEL, 0x10E8, pci_quirk_mf_endpoint_acs },
4340 { PCI_VENDOR_ID_INTEL, 0x150A, pci_quirk_mf_endpoint_acs },
4341 { PCI_VENDOR_ID_INTEL, 0x150D, pci_quirk_mf_endpoint_acs },
4342 { PCI_VENDOR_ID_INTEL, 0x1518, pci_quirk_mf_endpoint_acs },
4343 { PCI_VENDOR_ID_INTEL, 0x1526, pci_quirk_mf_endpoint_acs },
4344
4345 { PCI_VENDOR_ID_INTEL, 0x10A7, pci_quirk_mf_endpoint_acs },
4346 { PCI_VENDOR_ID_INTEL, 0x10A9, pci_quirk_mf_endpoint_acs },
4347 { PCI_VENDOR_ID_INTEL, 0x10D6, pci_quirk_mf_endpoint_acs },
4348
4349 { PCI_VENDOR_ID_INTEL, 0x1521, pci_quirk_mf_endpoint_acs },
4350 { PCI_VENDOR_ID_INTEL, 0x1522, pci_quirk_mf_endpoint_acs },
4351 { PCI_VENDOR_ID_INTEL, 0x1523, pci_quirk_mf_endpoint_acs },
4352 { PCI_VENDOR_ID_INTEL, 0x1524, pci_quirk_mf_endpoint_acs },
4353
4354 { PCI_VENDOR_ID_INTEL, 0x105E, pci_quirk_mf_endpoint_acs },
4355 { PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs },
4356 { PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs },
4357 { PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs },
4358
4359 { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
4360 { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
4361
4362 { 0x17cb, 0x400, pci_quirk_qcom_rp_acs },
4363 { 0x17cb, 0x401, pci_quirk_qcom_rp_acs },
4364
4365 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
4366 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
4367 { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs },
4368 { 0x10df, 0x720, pci_quirk_mf_endpoint_acs },
4369
4370 { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
4371 { 0 }
4372};
4373
4374int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
4375{
4376 const struct pci_dev_acs_enabled *i;
4377 int ret;
4378
4379
4380
4381
4382
4383
4384
4385 for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
4386 if ((i->vendor == dev->vendor ||
4387 i->vendor == (u16)PCI_ANY_ID) &&
4388 (i->device == dev->device ||
4389 i->device == (u16)PCI_ANY_ID)) {
4390 ret = i->acs_enabled(dev, acs_flags);
4391 if (ret >= 0)
4392 return ret;
4393 }
4394 }
4395
4396 return -ENOTTY;
4397}
4398
4399
4400#define INTEL_LPC_RCBA_REG 0xf0
4401
4402#define INTEL_LPC_RCBA_MASK 0xffffc000
4403
4404#define INTEL_LPC_RCBA_ENABLE (1 << 0)
4405
4406
4407#define INTEL_BSPR_REG 0x1104
4408
4409#define INTEL_BSPR_REG_BPNPD (1 << 8)
4410
4411#define INTEL_BSPR_REG_BPPD (1 << 9)
4412
4413
4414#define INTEL_UPDCR_REG 0x1114
4415
4416#define INTEL_UPDCR_REG_MASK 0x3f
4417
4418static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
4419{
4420 u32 rcba, bspr, updcr;
4421 void __iomem *rcba_mem;
4422
4423
4424
4425
4426
4427
4428 pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
4429 INTEL_LPC_RCBA_REG, &rcba);
4430 if (!(rcba & INTEL_LPC_RCBA_ENABLE))
4431 return -EINVAL;
4432
4433 rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK,
4434 PAGE_ALIGN(INTEL_UPDCR_REG));
4435 if (!rcba_mem)
4436 return -ENOMEM;
4437
4438
4439
4440
4441
4442
4443
4444
4445 bspr = readl(rcba_mem + INTEL_BSPR_REG);
4446 bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD;
4447 if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
4448 updcr = readl(rcba_mem + INTEL_UPDCR_REG);
4449 if (updcr & INTEL_UPDCR_REG_MASK) {
4450 dev_info(&dev->dev, "Disabling UPDCR peer decodes\n");
4451 updcr &= ~INTEL_UPDCR_REG_MASK;
4452 writel(updcr, rcba_mem + INTEL_UPDCR_REG);
4453 }
4454 }
4455
4456 iounmap(rcba_mem);
4457 return 0;
4458}
4459
4460
4461#define INTEL_MPC_REG 0xd8
4462
4463#define INTEL_MPC_REG_IRBNCE (1 << 26)
4464
4465static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
4466{
4467 u32 mpc;
4468
4469
4470
4471
4472
4473
4474
4475 pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
4476 if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
4477 dev_info(&dev->dev, "Enabling MPC IRBNCE\n");
4478 mpc |= INTEL_MPC_REG_IRBNCE;
4479 pci_write_config_word(dev, INTEL_MPC_REG, mpc);
4480 }
4481}
4482
4483static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
4484{
4485 if (!pci_quirk_intel_pch_acs_match(dev))
4486 return -ENOTTY;
4487
4488 if (pci_quirk_enable_intel_lpc_acs(dev)) {
4489 dev_warn(&dev->dev, "Failed to enable Intel PCH ACS quirk\n");
4490 return 0;
4491 }
4492
4493 pci_quirk_enable_intel_rp_mpc_acs(dev);
4494
4495 dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
4496
4497 dev_info(&dev->dev, "Intel PCH root port ACS workaround enabled\n");
4498
4499 return 0;
4500}
4501
4502static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
4503{
4504 int pos;
4505 u32 cap, ctrl;
4506
4507 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4508 return -ENOTTY;
4509
4510 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
4511 if (!pos)
4512 return -ENOTTY;
4513
4514 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4515 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4516
4517 ctrl |= (cap & PCI_ACS_SV);
4518 ctrl |= (cap & PCI_ACS_RR);
4519 ctrl |= (cap & PCI_ACS_CR);
4520 ctrl |= (cap & PCI_ACS_UF);
4521
4522 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
4523
4524 dev_info(&dev->dev, "Intel SPT PCH root port ACS workaround enabled\n");
4525
4526 return 0;
4527}
4528
4529static const struct pci_dev_enable_acs {
4530 u16 vendor;
4531 u16 device;
4532 int (*enable_acs)(struct pci_dev *dev);
4533} pci_dev_enable_acs[] = {
4534 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
4535 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_spt_pch_acs },
4536 { 0 }
4537};
4538
4539int pci_dev_specific_enable_acs(struct pci_dev *dev)
4540{
4541 const struct pci_dev_enable_acs *i;
4542 int ret;
4543
4544 for (i = pci_dev_enable_acs; i->enable_acs; i++) {
4545 if ((i->vendor == dev->vendor ||
4546 i->vendor == (u16)PCI_ANY_ID) &&
4547 (i->device == dev->device ||
4548 i->device == (u16)PCI_ANY_ID)) {
4549 ret = i->enable_acs(dev);
4550 if (ret >= 0)
4551 return ret;
4552 }
4553 }
4554
4555 return -ENOTTY;
4556}
4557
4558
4559
4560
4561
4562
4563
4564
4565static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
4566{
4567 int pos, i = 0;
4568 u8 next_cap;
4569 u16 reg16, *cap;
4570 struct pci_cap_saved_state *state;
4571
4572
4573 if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP))
4574 return;
4575
4576
4577 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
4578 if (!pos)
4579 return;
4580
4581
4582
4583
4584
4585 pci_read_config_byte(pdev, pos + 1, &next_cap);
4586 if (next_cap)
4587 return;
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597 pos = 0x50;
4598 pci_read_config_word(pdev, pos, ®16);
4599 if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) {
4600 u32 status;
4601#ifndef PCI_EXP_SAVE_REGS
4602#define PCI_EXP_SAVE_REGS 7
4603#endif
4604 int size = PCI_EXP_SAVE_REGS * sizeof(u16);
4605
4606 pdev->pcie_cap = pos;
4607 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
4608 pdev->pcie_flags_reg = reg16;
4609 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16);
4610 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
4611
4612 pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
4613 if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
4614 PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
4615 pdev->cfg_size = PCI_CFG_SPACE_SIZE;
4616
4617 if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
4618 return;
4619
4620
4621
4622
4623 state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
4624 if (!state)
4625 return;
4626
4627 state->cap.cap_nr = PCI_CAP_ID_EXP;
4628 state->cap.cap_extended = 0;
4629 state->cap.size = size;
4630 cap = (u16 *)&state->cap.data[0];
4631 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]);
4632 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]);
4633 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]);
4634 pcie_capability_read_word(pdev, PCI_EXP_RTCTL, &cap[i++]);
4635 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]);
4636 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]);
4637 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]);
4638 hlist_add_head(&state->next, &pdev->saved_cap_space);
4639 }
4640}
4641DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
4642
4643
4644
4645
4646
4647
4648
4649static void quirk_no_aersid(struct pci_dev *pdev)
4650{
4651
4652 if (pdev->bus->sysdata && pci_domain_nr(pdev->bus) >= 0x10000)
4653 pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
4654}
4655DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
4656DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
4657DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
4658DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
4659
4660
4661static void quirk_intel_no_flr(struct pci_dev *dev)
4662{
4663 dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
4664}
4665DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr);
4666DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr);
4667