1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/kernel.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/device.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_host.h>
45#include <linux/libata.h>
46
47#define DRV_NAME "sata_via"
48#define DRV_VERSION "2.6"
49
50
51
52
53
54enum board_ids_enum {
55 vt6420,
56 vt6421,
57 vt8251,
58};
59
60enum {
61 SATA_CHAN_ENAB = 0x40,
62 SATA_INT_GATE = 0x41,
63 SATA_NATIVE_MODE = 0x42,
64 SVIA_MISC_3 = 0x46,
65 PATA_UDMA_TIMING = 0xB3,
66 PATA_PIO_TIMING = 0xAB,
67
68 PORT0 = (1 << 1),
69 PORT1 = (1 << 0),
70 ALL_PORTS = PORT0 | PORT1,
71
72 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
73
74 SATA_EXT_PHY = (1 << 6),
75
76 SATA_HOTPLUG = (1 << 5),
77};
78
79struct svia_priv {
80 bool wd_workaround;
81};
82
83static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
84#ifdef CONFIG_PM_SLEEP
85static int svia_pci_device_resume(struct pci_dev *pdev);
86#endif
87static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
88static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
89static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val);
90static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val);
91static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
92static void svia_noop_freeze(struct ata_port *ap);
93static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
94static void vt6420_bmdma_start(struct ata_queued_cmd *qc);
95static int vt6421_pata_cable_detect(struct ata_port *ap);
96static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
97static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
98static void vt6421_error_handler(struct ata_port *ap);
99
100static const struct pci_device_id svia_pci_tbl[] = {
101 { PCI_VDEVICE(VIA, 0x5337), vt6420 },
102 { PCI_VDEVICE(VIA, 0x0591), vt6420 },
103 { PCI_VDEVICE(VIA, 0x3149), vt6420 },
104 { PCI_VDEVICE(VIA, 0x3249), vt6421 },
105 { PCI_VDEVICE(VIA, 0x5372), vt6420 },
106 { PCI_VDEVICE(VIA, 0x7372), vt6420 },
107 { PCI_VDEVICE(VIA, 0x5287), vt8251 },
108 { PCI_VDEVICE(VIA, 0x9000), vt8251 },
109
110 { }
111};
112
113static struct pci_driver svia_pci_driver = {
114 .name = DRV_NAME,
115 .id_table = svia_pci_tbl,
116 .probe = svia_init_one,
117#ifdef CONFIG_PM_SLEEP
118 .suspend = ata_pci_device_suspend,
119 .resume = svia_pci_device_resume,
120#endif
121 .remove = ata_pci_remove_one,
122};
123
124static struct scsi_host_template svia_sht = {
125 ATA_BMDMA_SHT(DRV_NAME),
126};
127
128static struct ata_port_operations svia_base_ops = {
129 .inherits = &ata_bmdma_port_ops,
130 .sff_tf_load = svia_tf_load,
131};
132
133static struct ata_port_operations vt6420_sata_ops = {
134 .inherits = &svia_base_ops,
135 .freeze = svia_noop_freeze,
136 .prereset = vt6420_prereset,
137 .bmdma_start = vt6420_bmdma_start,
138};
139
140static struct ata_port_operations vt6421_pata_ops = {
141 .inherits = &svia_base_ops,
142 .cable_detect = vt6421_pata_cable_detect,
143 .set_piomode = vt6421_set_pio_mode,
144 .set_dmamode = vt6421_set_dma_mode,
145};
146
147static struct ata_port_operations vt6421_sata_ops = {
148 .inherits = &svia_base_ops,
149 .scr_read = svia_scr_read,
150 .scr_write = svia_scr_write,
151 .error_handler = vt6421_error_handler,
152};
153
154static struct ata_port_operations vt8251_ops = {
155 .inherits = &svia_base_ops,
156 .hardreset = sata_std_hardreset,
157 .scr_read = vt8251_scr_read,
158 .scr_write = vt8251_scr_write,
159};
160
161static const struct ata_port_info vt6420_port_info = {
162 .flags = ATA_FLAG_SATA,
163 .pio_mask = ATA_PIO4,
164 .mwdma_mask = ATA_MWDMA2,
165 .udma_mask = ATA_UDMA6,
166 .port_ops = &vt6420_sata_ops,
167};
168
169static struct ata_port_info vt6421_sport_info = {
170 .flags = ATA_FLAG_SATA,
171 .pio_mask = ATA_PIO4,
172 .mwdma_mask = ATA_MWDMA2,
173 .udma_mask = ATA_UDMA6,
174 .port_ops = &vt6421_sata_ops,
175};
176
177static struct ata_port_info vt6421_pport_info = {
178 .flags = ATA_FLAG_SLAVE_POSS,
179 .pio_mask = ATA_PIO4,
180
181 .udma_mask = ATA_UDMA6,
182 .port_ops = &vt6421_pata_ops,
183};
184
185static struct ata_port_info vt8251_port_info = {
186 .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS,
187 .pio_mask = ATA_PIO4,
188 .mwdma_mask = ATA_MWDMA2,
189 .udma_mask = ATA_UDMA6,
190 .port_ops = &vt8251_ops,
191};
192
193MODULE_AUTHOR("Jeff Garzik");
194MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
195MODULE_LICENSE("GPL");
196MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
197MODULE_VERSION(DRV_VERSION);
198
199static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
200{
201 if (sc_reg > SCR_CONTROL)
202 return -EINVAL;
203 *val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg));
204 return 0;
205}
206
207static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
208{
209 if (sc_reg > SCR_CONTROL)
210 return -EINVAL;
211 iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg));
212 return 0;
213}
214
215static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
216{
217 static const u8 ipm_tbl[] = { 1, 2, 6, 0 };
218 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
219 int slot = 2 * link->ap->port_no + link->pmp;
220 u32 v = 0;
221 u8 raw;
222
223 switch (scr) {
224 case SCR_STATUS:
225 pci_read_config_byte(pdev, 0xA0 + slot, &raw);
226
227
228 v |= raw & 0x03;
229
230
231 if (raw & (1 << 4))
232 v |= 0x02 << 4;
233 else
234 v |= 0x01 << 4;
235
236
237 v |= ipm_tbl[(raw >> 2) & 0x3];
238 break;
239
240 case SCR_ERROR:
241
242 WARN_ON(pdev->device != 0x5287);
243 pci_read_config_dword(pdev, 0xB0 + slot * 4, &v);
244 break;
245
246 case SCR_CONTROL:
247 pci_read_config_byte(pdev, 0xA4 + slot, &raw);
248
249
250 v |= ((raw & 0x02) << 1) | (raw & 0x01);
251
252
253 v |= ((raw >> 2) & 0x03) << 8;
254 break;
255
256 default:
257 return -EINVAL;
258 }
259
260 *val = v;
261 return 0;
262}
263
264static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val)
265{
266 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
267 int slot = 2 * link->ap->port_no + link->pmp;
268 u32 v = 0;
269
270 switch (scr) {
271 case SCR_ERROR:
272
273 WARN_ON(pdev->device != 0x5287);
274 pci_write_config_dword(pdev, 0xB0 + slot * 4, val);
275 return 0;
276
277 case SCR_CONTROL:
278
279 v |= ((val & 0x4) >> 1) | (val & 0x1);
280
281
282 v |= ((val >> 8) & 0x3) << 2;
283
284 pci_write_config_byte(pdev, 0xA4 + slot, v);
285 return 0;
286
287 default:
288 return -EINVAL;
289 }
290}
291
292
293
294
295
296
297
298
299
300
301
302
303static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
304{
305 struct ata_taskfile ttf;
306
307 if (tf->ctl != ap->last_ctl) {
308 ttf = *tf;
309 ttf.flags |= ATA_TFLAG_DEVICE;
310 tf = &ttf;
311 }
312 ata_sff_tf_load(ap, tf);
313}
314
315static void svia_noop_freeze(struct ata_port *ap)
316{
317
318
319
320 ap->ops->sff_check_status(ap);
321 ata_bmdma_irq_clear(ap);
322}
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
345{
346 struct ata_port *ap = link->ap;
347 struct ata_eh_context *ehc = &ap->link.eh_context;
348 unsigned long timeout = jiffies + (HZ * 5);
349 u32 sstatus, scontrol;
350 int online;
351
352
353 if (!(ap->pflags & ATA_PFLAG_LOADING))
354 goto skip_scr;
355
356
357 svia_scr_write(link, SCR_CONTROL, 0x300);
358 svia_scr_read(link, SCR_CONTROL, &scontrol);
359
360
361 do {
362 ata_msleep(link->ap, 200);
363 svia_scr_read(link, SCR_STATUS, &sstatus);
364 if ((sstatus & 0xf) != 1)
365 break;
366 } while (time_before(jiffies, timeout));
367
368
369 svia_scr_read(link, SCR_STATUS, &sstatus);
370 svia_scr_read(link, SCR_CONTROL, &scontrol);
371
372 online = (sstatus & 0xf) == 0x3;
373
374 ata_port_info(ap,
375 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
376 online ? "up" : "down", sstatus, scontrol);
377
378
379 svia_scr_read(link, SCR_STATUS, &sstatus);
380
381 if (!online) {
382
383 ehc->i.action &= ~ATA_EH_RESET;
384 return 0;
385 }
386
387 skip_scr:
388
389 ata_sff_wait_ready(link, deadline);
390
391 return 0;
392}
393
394static void vt6420_bmdma_start(struct ata_queued_cmd *qc)
395{
396 struct ata_port *ap = qc->ap;
397 if ((qc->tf.command == ATA_CMD_PACKET) &&
398 (qc->scsicmd->sc_data_direction == DMA_TO_DEVICE)) {
399
400 ata_sff_pause(ap);
401 }
402 ata_bmdma_start(qc);
403}
404
405static int vt6421_pata_cable_detect(struct ata_port *ap)
406{
407 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
408 u8 tmp;
409
410 pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp);
411 if (tmp & 0x10)
412 return ATA_CBL_PATA40;
413 return ATA_CBL_PATA80;
414}
415
416static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
417{
418 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
419 static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 };
420 pci_write_config_byte(pdev, PATA_PIO_TIMING - adev->devno,
421 pio_bits[adev->pio_mode - XFER_PIO_0]);
422}
423
424static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
425{
426 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
427 static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
428 pci_write_config_byte(pdev, PATA_UDMA_TIMING - adev->devno,
429 udma_bits[adev->dma_mode - XFER_UDMA_0]);
430}
431
432static const unsigned int svia_bar_sizes[] = {
433 8, 4, 8, 4, 16, 256
434};
435
436static const unsigned int vt6421_bar_sizes[] = {
437 16, 16, 16, 16, 32, 128
438};
439
440static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port)
441{
442 return addr + (port * 128);
443}
444
445static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port)
446{
447 return addr + (port * 64);
448}
449
450static void vt6421_init_addrs(struct ata_port *ap)
451{
452 void __iomem * const * iomap = ap->host->iomap;
453 void __iomem *reg_addr = iomap[ap->port_no];
454 void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8);
455 struct ata_ioports *ioaddr = &ap->ioaddr;
456
457 ioaddr->cmd_addr = reg_addr;
458 ioaddr->altstatus_addr =
459 ioaddr->ctl_addr = (void __iomem *)
460 ((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS);
461 ioaddr->bmdma_addr = bmdma_addr;
462 ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no);
463
464 ata_sff_std_ports(ioaddr);
465
466 ata_port_pbar_desc(ap, ap->port_no, -1, "port");
467 ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma");
468}
469
470static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
471{
472 const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL };
473 struct ata_host *host;
474 int rc;
475
476 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
477 if (rc)
478 return rc;
479 *r_host = host;
480
481 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
482 if (rc) {
483 dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
484 return rc;
485 }
486
487 host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0);
488 host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1);
489
490 return 0;
491}
492
493static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
494{
495 const struct ata_port_info *ppi[] =
496 { &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
497 struct ata_host *host;
498 int i, rc;
499
500 *r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
501 if (!host) {
502 dev_err(&pdev->dev, "failed to allocate host\n");
503 return -ENOMEM;
504 }
505
506 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
507 if (rc) {
508 dev_err(&pdev->dev, "failed to request/iomap PCI BARs (errno=%d)\n",
509 rc);
510 return rc;
511 }
512 host->iomap = pcim_iomap_table(pdev);
513
514 for (i = 0; i < host->n_ports; i++)
515 vt6421_init_addrs(host->ports[i]);
516
517 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
518 if (rc)
519 return rc;
520 rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
521 if (rc)
522 return rc;
523
524 return 0;
525}
526
527static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
528{
529 const struct ata_port_info *ppi[] = { &vt8251_port_info, NULL };
530 struct ata_host *host;
531 int i, rc;
532
533 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
534 if (rc)
535 return rc;
536 *r_host = host;
537
538 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
539 if (rc) {
540 dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
541 return rc;
542 }
543
544
545 for (i = 0; i < host->n_ports; i++)
546 ata_slave_link_init(host->ports[i]);
547
548 return 0;
549}
550
551static void svia_wd_fix(struct pci_dev *pdev)
552{
553 u8 tmp8;
554
555 pci_read_config_byte(pdev, 0x52, &tmp8);
556 pci_write_config_byte(pdev, 0x52, tmp8 | BIT(2));
557}
558
559static irqreturn_t vt6421_interrupt(int irq, void *dev_instance)
560{
561 struct ata_host *host = dev_instance;
562 irqreturn_t rc = ata_bmdma_interrupt(irq, dev_instance);
563
564
565 if (rc != IRQ_HANDLED) {
566 u32 serror;
567 unsigned long flags;
568
569 spin_lock_irqsave(&host->lock, flags);
570
571 svia_scr_read(&host->ports[0]->link, SCR_ERROR, &serror);
572 if (serror & SERR_PHYRDY_CHG) {
573 ata_ehi_hotplugged(&host->ports[0]->link.eh_info);
574 ata_port_freeze(host->ports[0]);
575 rc = IRQ_HANDLED;
576 }
577
578 svia_scr_read(&host->ports[1]->link, SCR_ERROR, &serror);
579 if (serror & SERR_PHYRDY_CHG) {
580 ata_ehi_hotplugged(&host->ports[1]->link.eh_info);
581 ata_port_freeze(host->ports[1]);
582 rc = IRQ_HANDLED;
583 }
584 spin_unlock_irqrestore(&host->lock, flags);
585 }
586
587 return rc;
588}
589
590static void vt6421_error_handler(struct ata_port *ap)
591{
592 struct svia_priv *hpriv = ap->host->private_data;
593 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
594 u32 serror;
595
596
597 if (!hpriv->wd_workaround) {
598 svia_scr_read(&ap->link, SCR_ERROR, &serror);
599 if (serror == 0x1000500) {
600 ata_port_warn(ap, "Incompatible drive: enabling workaround. This slows down transfer rate to ~60 MB/s");
601 svia_wd_fix(pdev);
602 hpriv->wd_workaround = true;
603 ap->link.eh_context.i.flags |= ATA_EHI_QUIET;
604 }
605 }
606
607 ata_sff_error_handler(ap);
608}
609
610static void svia_configure(struct pci_dev *pdev, int board_id,
611 struct svia_priv *hpriv)
612{
613 u8 tmp8;
614
615 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
616 dev_info(&pdev->dev, "routed to hard irq line %d\n",
617 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
618
619
620 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
621 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
622 dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n",
623 (int)tmp8);
624 tmp8 |= ALL_PORTS;
625 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
626 }
627
628
629 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
630 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
631 dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n",
632 (int) tmp8);
633 tmp8 |= ALL_PORTS;
634 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
635 }
636
637
638 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
639 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
640 dev_dbg(&pdev->dev,
641 "enabling SATA channel native mode (0x%x)\n",
642 (int) tmp8);
643 tmp8 |= NATIVE_MODE_ALL;
644 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
645 }
646
647
648 pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
649 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
650 dev_dbg(&pdev->dev,
651 "enabling SATA hotplug (0x%x)\n",
652 (int) tmp8);
653 tmp8 |= SATA_HOTPLUG;
654 pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
655 }
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684 if (board_id == vt6420) {
685 svia_wd_fix(pdev);
686 hpriv->wd_workaround = true;
687 }
688}
689
690static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
691{
692 unsigned int i;
693 int rc;
694 struct ata_host *host = NULL;
695 int board_id = (int) ent->driver_data;
696 const unsigned *bar_sizes;
697 struct svia_priv *hpriv;
698
699 ata_print_version_once(&pdev->dev, DRV_VERSION);
700
701 rc = pcim_enable_device(pdev);
702 if (rc)
703 return rc;
704
705 if (board_id == vt6421)
706 bar_sizes = &vt6421_bar_sizes[0];
707 else
708 bar_sizes = &svia_bar_sizes[0];
709
710 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
711 if ((pci_resource_start(pdev, i) == 0) ||
712 (pci_resource_len(pdev, i) < bar_sizes[i])) {
713 dev_err(&pdev->dev,
714 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
715 i,
716 (unsigned long long)pci_resource_start(pdev, i),
717 (unsigned long long)pci_resource_len(pdev, i));
718 return -ENODEV;
719 }
720
721 switch (board_id) {
722 case vt6420:
723 rc = vt6420_prepare_host(pdev, &host);
724 break;
725 case vt6421:
726 rc = vt6421_prepare_host(pdev, &host);
727 break;
728 case vt8251:
729 rc = vt8251_prepare_host(pdev, &host);
730 break;
731 default:
732 rc = -EINVAL;
733 }
734 if (rc)
735 return rc;
736
737 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
738 if (!hpriv)
739 return -ENOMEM;
740 host->private_data = hpriv;
741
742 svia_configure(pdev, board_id, hpriv);
743
744 pci_set_master(pdev);
745 if (board_id == vt6421)
746 return ata_host_activate(host, pdev->irq, vt6421_interrupt,
747 IRQF_SHARED, &svia_sht);
748 else
749 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
750 IRQF_SHARED, &svia_sht);
751}
752
753#ifdef CONFIG_PM_SLEEP
754static int svia_pci_device_resume(struct pci_dev *pdev)
755{
756 struct ata_host *host = pci_get_drvdata(pdev);
757 struct svia_priv *hpriv = host->private_data;
758 int rc;
759
760 rc = ata_pci_device_do_resume(pdev);
761 if (rc)
762 return rc;
763
764 if (hpriv->wd_workaround)
765 svia_wd_fix(pdev);
766 ata_host_resume(host);
767
768 return 0;
769}
770#endif
771
772module_pci_driver(svia_pci_driver);
773