1
2
3
4
5
6
7
8
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/export.h>
12#include <linux/pci.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/ide.h>
16#include <linux/dma-mapping.h>
17
18#include <asm/io.h>
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33static int ide_setup_pci_baseregs(struct pci_dev *dev, const char *name)
34{
35 u8 progif = 0;
36
37
38
39
40 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
41 (progif & 5) != 5) {
42 if ((progif & 0xa) != 0xa) {
43 printk(KERN_INFO "%s %s: device not capable of full "
44 "native PCI mode\n", name, pci_name(dev));
45 return -EOPNOTSUPP;
46 }
47 printk(KERN_INFO "%s %s: placing both ports into native PCI "
48 "mode\n", name, pci_name(dev));
49 (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
50 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
51 (progif & 5) != 5) {
52 printk(KERN_ERR "%s %s: rewrite of PROGIF failed, "
53 "wanted 0x%04x, got 0x%04x\n",
54 name, pci_name(dev), progif | 5, progif);
55 return -EOPNOTSUPP;
56 }
57 }
58 return 0;
59}
60
61#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
62static int ide_pci_clear_simplex(unsigned long dma_base, const char *name)
63{
64 u8 dma_stat = inb(dma_base + 2);
65
66 outb(dma_stat & 0x60, dma_base + 2);
67 dma_stat = inb(dma_base + 2);
68
69 return (dma_stat & 0x80) ? 1 : 0;
70}
71
72
73
74
75
76
77
78
79
80unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
81{
82 struct pci_dev *dev = to_pci_dev(hwif->dev);
83 unsigned long dma_base = 0;
84
85 if (hwif->host_flags & IDE_HFLAG_MMIO)
86 return hwif->dma_base;
87
88 if (hwif->mate && hwif->mate->dma_base) {
89 dma_base = hwif->mate->dma_base - (hwif->channel ? 0 : 8);
90 } else {
91 u8 baridx = (d->host_flags & IDE_HFLAG_CS5520) ? 2 : 4;
92
93 dma_base = pci_resource_start(dev, baridx);
94
95 if (dma_base == 0) {
96 printk(KERN_ERR "%s %s: DMA base is invalid\n",
97 d->name, pci_name(dev));
98 return 0;
99 }
100 }
101
102 if (hwif->channel)
103 dma_base += 8;
104
105 return dma_base;
106}
107EXPORT_SYMBOL_GPL(ide_pci_dma_base);
108
109int ide_pci_check_simplex(ide_hwif_t *hwif, const struct ide_port_info *d)
110{
111 struct pci_dev *dev = to_pci_dev(hwif->dev);
112 u8 dma_stat;
113
114 if (d->host_flags & (IDE_HFLAG_MMIO | IDE_HFLAG_CS5520))
115 goto out;
116
117 if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) {
118 if (ide_pci_clear_simplex(hwif->dma_base, d->name))
119 printk(KERN_INFO "%s %s: simplex device: DMA forced\n",
120 d->name, pci_name(dev));
121 goto out;
122 }
123
124
125
126
127
128
129
130
131
132
133
134 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
135 if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) {
136 printk(KERN_INFO "%s %s: simplex device: DMA disabled\n",
137 d->name, pci_name(dev));
138 return -1;
139 }
140out:
141 return 0;
142}
143EXPORT_SYMBOL_GPL(ide_pci_check_simplex);
144
145
146
147
148int ide_pci_set_master(struct pci_dev *dev, const char *name)
149{
150 u16 pcicmd;
151
152 pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
153
154 if ((pcicmd & PCI_COMMAND_MASTER) == 0) {
155 pci_set_master(dev);
156
157 if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) ||
158 (pcicmd & PCI_COMMAND_MASTER) == 0) {
159 printk(KERN_ERR "%s %s: error updating PCICMD\n",
160 name, pci_name(dev));
161 return -EIO;
162 }
163 }
164
165 return 0;
166}
167EXPORT_SYMBOL_GPL(ide_pci_set_master);
168#endif
169
170void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
171{
172 printk(KERN_INFO "%s %s: IDE controller (0x%04x:0x%04x rev 0x%02x)\n",
173 d->name, pci_name(dev),
174 dev->vendor, dev->device, dev->revision);
175}
176EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
193{
194 int ret, bars;
195
196 if (pci_enable_device(dev)) {
197 ret = pci_enable_device_io(dev);
198 if (ret < 0) {
199 printk(KERN_WARNING "%s %s: couldn't enable device\n",
200 d->name, pci_name(dev));
201 goto out;
202 }
203 printk(KERN_WARNING "%s %s: BIOS configuration fixed\n",
204 d->name, pci_name(dev));
205 }
206
207
208
209
210
211
212 ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
213 if (ret < 0) {
214 printk(KERN_ERR "%s %s: can't set DMA mask\n",
215 d->name, pci_name(dev));
216 goto out;
217 }
218
219 if (d->host_flags & IDE_HFLAG_SINGLE)
220 bars = (1 << 2) - 1;
221 else
222 bars = (1 << 4) - 1;
223
224 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
225 if (d->host_flags & IDE_HFLAG_CS5520)
226 bars |= (1 << 2);
227 else
228 bars |= (1 << 4);
229 }
230
231 ret = pci_request_selected_regions(dev, bars, d->name);
232 if (ret < 0)
233 printk(KERN_ERR "%s %s: can't reserve resources\n",
234 d->name, pci_name(dev));
235out:
236 return ret;
237}
238
239
240
241
242
243
244
245
246
247
248static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
249{
250 u16 pcicmd = 0;
251
252
253
254
255
256
257
258 if (ide_setup_pci_baseregs(dev, d->name) ||
259 pci_write_config_word(dev, PCI_COMMAND, pcicmd | PCI_COMMAND_IO)) {
260 printk(KERN_INFO "%s %s: device disabled (BIOS)\n",
261 d->name, pci_name(dev));
262 return -ENODEV;
263 }
264 if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd)) {
265 printk(KERN_ERR "%s %s: error accessing PCI regs\n",
266 d->name, pci_name(dev));
267 return -EIO;
268 }
269 if (!(pcicmd & PCI_COMMAND_IO)) {
270 printk(KERN_ERR "%s %s: unable to enable IDE controller\n",
271 d->name, pci_name(dev));
272 return -ENXIO;
273 }
274 return 0;
275}
276
277
278
279
280
281
282
283
284
285
286
287static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *d,
288 int bar)
289{
290 ulong flags = pci_resource_flags(dev, bar);
291
292
293 if (!flags || pci_resource_len(dev, bar) == 0)
294 return 0;
295
296
297 if (flags & IORESOURCE_IO)
298 return 0;
299
300
301 return -EINVAL;
302}
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
319 unsigned int port, struct ide_hw *hw)
320{
321 unsigned long ctl = 0, base = 0;
322
323 if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
324 if (ide_pci_check_iomem(dev, d, 2 * port) ||
325 ide_pci_check_iomem(dev, d, 2 * port + 1)) {
326 printk(KERN_ERR "%s %s: I/O baseregs (BIOS) are "
327 "reported as MEM for port %d!\n",
328 d->name, pci_name(dev), port);
329 return -EINVAL;
330 }
331
332 ctl = pci_resource_start(dev, 2*port+1);
333 base = pci_resource_start(dev, 2*port);
334 } else {
335
336 ctl = port ? 0x374 : 0x3f4;
337 base = port ? 0x170 : 0x1f0;
338 }
339
340 if (!base || !ctl) {
341 printk(KERN_ERR "%s %s: bad PCI BARs for port %d, skipping\n",
342 d->name, pci_name(dev), port);
343 return -EINVAL;
344 }
345
346 memset(hw, 0, sizeof(*hw));
347 hw->dev = &dev->dev;
348 ide_std_init_ports(hw, base, ctl | 2);
349
350 return 0;
351}
352
353#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
354
355
356
357
358
359
360
361
362
363
364int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
365{
366 struct pci_dev *dev = to_pci_dev(hwif->dev);
367
368 if ((d->host_flags & IDE_HFLAG_NO_AUTODMA) == 0 ||
369 ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
370 (dev->class & 0x80))) {
371 unsigned long base = ide_pci_dma_base(hwif, d);
372
373 if (base == 0)
374 return -1;
375
376 hwif->dma_base = base;
377
378 if (hwif->dma_ops == NULL)
379 hwif->dma_ops = &sff_dma_ops;
380
381 if (ide_pci_check_simplex(hwif, d) < 0)
382 return -1;
383
384 if (ide_pci_set_master(dev, d->name) < 0)
385 return -1;
386
387 if (hwif->host_flags & IDE_HFLAG_MMIO)
388 printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
389 else
390 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
391 hwif->name, base, base + 7);
392
393 hwif->extra_base = base + (hwif->channel ? 8 : 16);
394
395 if (ide_allocate_dma_engine(hwif))
396 return -1;
397 }
398
399 return 0;
400}
401#endif
402
403
404
405
406
407
408
409
410
411
412
413
414static int ide_setup_pci_controller(struct pci_dev *dev,
415 const struct ide_port_info *d, int noisy)
416{
417 int ret;
418 u16 pcicmd;
419
420 if (noisy)
421 ide_setup_pci_noise(dev, d);
422
423 ret = ide_pci_enable(dev, d);
424 if (ret < 0)
425 goto out;
426
427 ret = pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
428 if (ret < 0) {
429 printk(KERN_ERR "%s %s: error accessing PCI regs\n",
430 d->name, pci_name(dev));
431 goto out;
432 }
433 if (!(pcicmd & PCI_COMMAND_IO)) {
434 ret = ide_pci_configure(dev, d);
435 if (ret < 0)
436 goto out;
437 printk(KERN_INFO "%s %s: device enabled (Linux)\n",
438 d->name, pci_name(dev));
439 }
440
441out:
442 return ret;
443}
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
462 struct ide_hw *hw, struct ide_hw **hws)
463{
464 int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
465 u8 tmp;
466
467
468
469
470
471 for (port = 0; port < channels; ++port) {
472 const struct ide_pci_enablebit *e = &d->enablebits[port];
473
474 if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) ||
475 (tmp & e->mask) != e->val)) {
476 printk(KERN_INFO "%s %s: IDE port disabled\n",
477 d->name, pci_name(dev));
478 continue;
479 }
480
481 if (ide_hw_configure(dev, d, port, hw + port))
482 continue;
483
484 *(hws + port) = hw + port;
485 }
486}
487EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
488
489
490
491
492
493
494
495
496
497
498
499static int do_ide_setup_pci_device(struct pci_dev *dev,
500 const struct ide_port_info *d,
501 u8 noisy)
502{
503 int pciirq, ret;
504
505
506
507
508 pciirq = dev->irq;
509
510
511
512
513
514
515
516 ret = d->init_chipset ? d->init_chipset(dev) : 0;
517 if (ret < 0)
518 goto out;
519
520 if (ide_pci_is_in_compatibility_mode(dev)) {
521 if (noisy)
522 printk(KERN_INFO "%s %s: not 100%% native mode: will "
523 "probe irqs later\n", d->name, pci_name(dev));
524 pciirq = 0;
525 } else if (!pciirq && noisy) {
526 printk(KERN_WARNING "%s %s: bad irq (%d): will probe later\n",
527 d->name, pci_name(dev), pciirq);
528 } else if (noisy) {
529 printk(KERN_INFO "%s %s: 100%% native mode on irq %d\n",
530 d->name, pci_name(dev), pciirq);
531 }
532
533 ret = pciirq;
534out:
535 return ret;
536}
537
538int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
539 const struct ide_port_info *d, void *priv)
540{
541 struct pci_dev *pdev[] = { dev1, dev2 };
542 struct ide_host *host;
543 int ret, i, n_ports = dev2 ? 4 : 2;
544 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
545
546 for (i = 0; i < n_ports / 2; i++) {
547 ret = ide_setup_pci_controller(pdev[i], d, !i);
548 if (ret < 0)
549 goto out;
550
551 ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
552 }
553
554 host = ide_host_alloc(d, hws, n_ports);
555 if (host == NULL) {
556 ret = -ENOMEM;
557 goto out;
558 }
559
560 host->dev[0] = &dev1->dev;
561 if (dev2)
562 host->dev[1] = &dev2->dev;
563
564 host->host_priv = priv;
565 host->irq_flags = IRQF_SHARED;
566
567 pci_set_drvdata(pdev[0], host);
568 if (dev2)
569 pci_set_drvdata(pdev[1], host);
570
571 for (i = 0; i < n_ports / 2; i++) {
572 ret = do_ide_setup_pci_device(pdev[i], d, !i);
573
574
575
576
577
578 if (ret < 0)
579 goto out;
580
581
582 if (ide_pci_is_in_compatibility_mode(pdev[i])) {
583 hw[i*2].irq = pci_get_legacy_ide_irq(pdev[i], 0);
584 hw[i*2 + 1].irq = pci_get_legacy_ide_irq(pdev[i], 1);
585 } else
586 hw[i*2 + 1].irq = hw[i*2].irq = ret;
587 }
588
589 ret = ide_host_register(host, d, hws);
590 if (ret)
591 ide_host_free(host);
592out:
593 return ret;
594}
595EXPORT_SYMBOL_GPL(ide_pci_init_two);
596
597int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
598 void *priv)
599{
600 return ide_pci_init_two(dev, NULL, d, priv);
601}
602EXPORT_SYMBOL_GPL(ide_pci_init_one);
603
604void ide_pci_remove(struct pci_dev *dev)
605{
606 struct ide_host *host = pci_get_drvdata(dev);
607 struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
608 int bars;
609
610 if (host->host_flags & IDE_HFLAG_SINGLE)
611 bars = (1 << 2) - 1;
612 else
613 bars = (1 << 4) - 1;
614
615 if ((host->host_flags & IDE_HFLAG_NO_DMA) == 0) {
616 if (host->host_flags & IDE_HFLAG_CS5520)
617 bars |= (1 << 2);
618 else
619 bars |= (1 << 4);
620 }
621
622 ide_host_remove(host);
623
624 if (dev2)
625 pci_release_selected_regions(dev2, bars);
626 pci_release_selected_regions(dev, bars);
627
628 if (dev2)
629 pci_disable_device(dev2);
630 pci_disable_device(dev);
631}
632EXPORT_SYMBOL_GPL(ide_pci_remove);
633
634#ifdef CONFIG_PM
635int ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
636{
637 pci_save_state(dev);
638 pci_disable_device(dev);
639 pci_set_power_state(dev, pci_choose_state(dev, state));
640
641 return 0;
642}
643EXPORT_SYMBOL_GPL(ide_pci_suspend);
644
645int ide_pci_resume(struct pci_dev *dev)
646{
647 struct ide_host *host = pci_get_drvdata(dev);
648 int rc;
649
650 pci_set_power_state(dev, PCI_D0);
651
652 rc = pci_enable_device(dev);
653 if (rc)
654 return rc;
655
656 pci_restore_state(dev);
657 pci_set_master(dev);
658
659 if (host->init_chipset)
660 host->init_chipset(dev);
661
662 return 0;
663}
664EXPORT_SYMBOL_GPL(ide_pci_resume);
665#endif
666