1
2
3
4
5
6
7
8
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/export.h>
12#include <linux/pci.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/ide.h>
16#include <linux/dma-mapping.h>
17
18#include <asm/io.h>
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33static int ide_setup_pci_baseregs(struct pci_dev *dev, const char *name)
34{
35 u8 progif = 0;
36
37
38
39
40 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
41 (progif & 5) != 5) {
42 if ((progif & 0xa) != 0xa) {
43 printk(KERN_INFO "%s %s: device not capable of full "
44 "native PCI mode\n", name, pci_name(dev));
45 return -EOPNOTSUPP;
46 }
47 printk(KERN_INFO "%s %s: placing both ports into native PCI "
48 "mode\n", name, pci_name(dev));
49 (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
50 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
51 (progif & 5) != 5) {
52 printk(KERN_ERR "%s %s: rewrite of PROGIF failed, "
53 "wanted 0x%04x, got 0x%04x\n",
54 name, pci_name(dev), progif | 5, progif);
55 return -EOPNOTSUPP;
56 }
57 }
58 return 0;
59}
60
61#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
62static int ide_pci_clear_simplex(unsigned long dma_base, const char *name)
63{
64 u8 dma_stat = inb(dma_base + 2);
65
66 outb(dma_stat & 0x60, dma_base + 2);
67 dma_stat = inb(dma_base + 2);
68
69 return (dma_stat & 0x80) ? 1 : 0;
70}
71
72
73
74
75
76
77
78
79
80unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
81{
82 struct pci_dev *dev = to_pci_dev(hwif->dev);
83 unsigned long dma_base = 0;
84
85 if (hwif->host_flags & IDE_HFLAG_MMIO)
86 return hwif->dma_base;
87
88 if (hwif->mate && hwif->mate->dma_base) {
89 dma_base = hwif->mate->dma_base - (hwif->channel ? 0 : 8);
90 } else {
91 u8 baridx = (d->host_flags & IDE_HFLAG_CS5520) ? 2 : 4;
92
93 dma_base = pci_resource_start(dev, baridx);
94
95 if (dma_base == 0) {
96 printk(KERN_ERR "%s %s: DMA base is invalid\n",
97 d->name, pci_name(dev));
98 return 0;
99 }
100 }
101
102 if (hwif->channel)
103 dma_base += 8;
104
105 return dma_base;
106}
107EXPORT_SYMBOL_GPL(ide_pci_dma_base);
108
109int ide_pci_check_simplex(ide_hwif_t *hwif, const struct ide_port_info *d)
110{
111 struct pci_dev *dev = to_pci_dev(hwif->dev);
112 u8 dma_stat;
113
114 if (d->host_flags & (IDE_HFLAG_MMIO | IDE_HFLAG_CS5520))
115 goto out;
116
117 if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) {
118 if (ide_pci_clear_simplex(hwif->dma_base, d->name))
119 printk(KERN_INFO "%s %s: simplex device: DMA forced\n",
120 d->name, pci_name(dev));
121 goto out;
122 }
123
124
125
126
127
128
129
130
131
132
133
134 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
135 if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) {
136 printk(KERN_INFO "%s %s: simplex device: DMA disabled\n",
137 d->name, pci_name(dev));
138 return -1;
139 }
140out:
141 return 0;
142}
143EXPORT_SYMBOL_GPL(ide_pci_check_simplex);
144
145
146
147
148int ide_pci_set_master(struct pci_dev *dev, const char *name)
149{
150 u16 pcicmd;
151
152 pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
153
154 if ((pcicmd & PCI_COMMAND_MASTER) == 0) {
155 pci_set_master(dev);
156
157 if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) ||
158 (pcicmd & PCI_COMMAND_MASTER) == 0) {
159 printk(KERN_ERR "%s %s: error updating PCICMD\n",
160 name, pci_name(dev));
161 return -EIO;
162 }
163 }
164
165 return 0;
166}
167EXPORT_SYMBOL_GPL(ide_pci_set_master);
168#endif
169
170void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
171{
172 printk(KERN_INFO "%s %s: IDE controller (0x%04x:0x%04x rev 0x%02x)\n",
173 d->name, pci_name(dev),
174 dev->vendor, dev->device, dev->revision);
175}
176EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193static int ide_pci_enable(struct pci_dev *dev, int bars,
194 const struct ide_port_info *d)
195{
196 int ret;
197
198 if (pci_enable_device(dev)) {
199 ret = pci_enable_device_io(dev);
200 if (ret < 0) {
201 printk(KERN_WARNING "%s %s: couldn't enable device\n",
202 d->name, pci_name(dev));
203 goto out;
204 }
205 printk(KERN_WARNING "%s %s: BIOS configuration fixed\n",
206 d->name, pci_name(dev));
207 }
208
209
210
211
212
213
214 ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
215 if (ret < 0) {
216 printk(KERN_ERR "%s %s: can't set DMA mask\n",
217 d->name, pci_name(dev));
218 goto out;
219 }
220
221 ret = pci_request_selected_regions(dev, bars, d->name);
222 if (ret < 0)
223 printk(KERN_ERR "%s %s: can't reserve resources\n",
224 d->name, pci_name(dev));
225out:
226 return ret;
227}
228
229
230
231
232
233
234
235
236
237
238static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
239{
240 u16 pcicmd = 0;
241
242
243
244
245
246
247
248 if (ide_setup_pci_baseregs(dev, d->name) ||
249 pci_write_config_word(dev, PCI_COMMAND, pcicmd | PCI_COMMAND_IO)) {
250 printk(KERN_INFO "%s %s: device disabled (BIOS)\n",
251 d->name, pci_name(dev));
252 return -ENODEV;
253 }
254 if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd)) {
255 printk(KERN_ERR "%s %s: error accessing PCI regs\n",
256 d->name, pci_name(dev));
257 return -EIO;
258 }
259 if (!(pcicmd & PCI_COMMAND_IO)) {
260 printk(KERN_ERR "%s %s: unable to enable IDE controller\n",
261 d->name, pci_name(dev));
262 return -ENXIO;
263 }
264 return 0;
265}
266
267
268
269
270
271
272
273
274
275
276
277static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *d,
278 int bar)
279{
280 ulong flags = pci_resource_flags(dev, bar);
281
282
283 if (!flags || pci_resource_len(dev, bar) == 0)
284 return 0;
285
286
287 if (flags & IORESOURCE_IO)
288 return 0;
289
290
291 return -EINVAL;
292}
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
309 unsigned int port, struct ide_hw *hw)
310{
311 unsigned long ctl = 0, base = 0;
312
313 if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
314 if (ide_pci_check_iomem(dev, d, 2 * port) ||
315 ide_pci_check_iomem(dev, d, 2 * port + 1)) {
316 printk(KERN_ERR "%s %s: I/O baseregs (BIOS) are "
317 "reported as MEM for port %d!\n",
318 d->name, pci_name(dev), port);
319 return -EINVAL;
320 }
321
322 ctl = pci_resource_start(dev, 2*port+1);
323 base = pci_resource_start(dev, 2*port);
324 } else {
325
326 ctl = port ? 0x374 : 0x3f4;
327 base = port ? 0x170 : 0x1f0;
328 }
329
330 if (!base || !ctl) {
331 printk(KERN_ERR "%s %s: bad PCI BARs for port %d, skipping\n",
332 d->name, pci_name(dev), port);
333 return -EINVAL;
334 }
335
336 memset(hw, 0, sizeof(*hw));
337 hw->dev = &dev->dev;
338 ide_std_init_ports(hw, base, ctl | 2);
339
340 return 0;
341}
342
343#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
344
345
346
347
348
349
350
351
352
353
354int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
355{
356 struct pci_dev *dev = to_pci_dev(hwif->dev);
357
358 if ((d->host_flags & IDE_HFLAG_NO_AUTODMA) == 0 ||
359 ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
360 (dev->class & 0x80))) {
361 unsigned long base = ide_pci_dma_base(hwif, d);
362
363 if (base == 0)
364 return -1;
365
366 hwif->dma_base = base;
367
368 if (hwif->dma_ops == NULL)
369 hwif->dma_ops = &sff_dma_ops;
370
371 if (ide_pci_check_simplex(hwif, d) < 0)
372 return -1;
373
374 if (ide_pci_set_master(dev, d->name) < 0)
375 return -1;
376
377 if (hwif->host_flags & IDE_HFLAG_MMIO)
378 printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
379 else
380 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
381 hwif->name, base, base + 7);
382
383 hwif->extra_base = base + (hwif->channel ? 8 : 16);
384
385 if (ide_allocate_dma_engine(hwif))
386 return -1;
387 }
388
389 return 0;
390}
391#endif
392
393
394
395
396
397
398
399
400
401
402
403
404
405static int ide_setup_pci_controller(struct pci_dev *dev, int bars,
406 const struct ide_port_info *d, int noisy)
407{
408 int ret;
409 u16 pcicmd;
410
411 if (noisy)
412 ide_setup_pci_noise(dev, d);
413
414 ret = ide_pci_enable(dev, bars, d);
415 if (ret < 0)
416 goto out;
417
418 ret = pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
419 if (ret < 0) {
420 printk(KERN_ERR "%s %s: error accessing PCI regs\n",
421 d->name, pci_name(dev));
422 goto out_free_bars;
423 }
424 if (!(pcicmd & PCI_COMMAND_IO)) {
425 ret = ide_pci_configure(dev, d);
426 if (ret < 0)
427 goto out_free_bars;
428 printk(KERN_INFO "%s %s: device enabled (Linux)\n",
429 d->name, pci_name(dev));
430 }
431
432 goto out;
433
434out_free_bars:
435 pci_release_selected_regions(dev, bars);
436out:
437 return ret;
438}
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
457 struct ide_hw *hw, struct ide_hw **hws)
458{
459 int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
460 u8 tmp;
461
462
463
464
465
466 for (port = 0; port < channels; ++port) {
467 const struct ide_pci_enablebit *e = &d->enablebits[port];
468
469 if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) ||
470 (tmp & e->mask) != e->val)) {
471 printk(KERN_INFO "%s %s: IDE port disabled\n",
472 d->name, pci_name(dev));
473 continue;
474 }
475
476 if (ide_hw_configure(dev, d, port, hw + port))
477 continue;
478
479 *(hws + port) = hw + port;
480 }
481}
482EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
483
484
485
486
487
488
489
490
491
492
493
494static int do_ide_setup_pci_device(struct pci_dev *dev,
495 const struct ide_port_info *d,
496 u8 noisy)
497{
498 int pciirq, ret;
499
500
501
502
503 pciirq = dev->irq;
504
505
506
507
508
509
510
511 ret = d->init_chipset ? d->init_chipset(dev) : 0;
512 if (ret < 0)
513 goto out;
514
515 if (ide_pci_is_in_compatibility_mode(dev)) {
516 if (noisy)
517 printk(KERN_INFO "%s %s: not 100%% native mode: will "
518 "probe irqs later\n", d->name, pci_name(dev));
519 pciirq = 0;
520 } else if (!pciirq && noisy) {
521 printk(KERN_WARNING "%s %s: bad irq (%d): will probe later\n",
522 d->name, pci_name(dev), pciirq);
523 } else if (noisy) {
524 printk(KERN_INFO "%s %s: 100%% native mode on irq %d\n",
525 d->name, pci_name(dev), pciirq);
526 }
527
528 ret = pciirq;
529out:
530 return ret;
531}
532
533int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
534 const struct ide_port_info *d, void *priv)
535{
536 struct pci_dev *pdev[] = { dev1, dev2 };
537 struct ide_host *host;
538 int ret, i, n_ports = dev2 ? 4 : 2, bars;
539 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
540
541 if (d->host_flags & IDE_HFLAG_SINGLE)
542 bars = (1 << 2) - 1;
543 else
544 bars = (1 << 4) - 1;
545
546 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
547 if (d->host_flags & IDE_HFLAG_CS5520)
548 bars |= (1 << 2);
549 else
550 bars |= (1 << 4);
551 }
552
553 for (i = 0; i < n_ports / 2; i++) {
554 ret = ide_setup_pci_controller(pdev[i], bars, d, !i);
555 if (ret < 0) {
556 if (i == 1)
557 pci_release_selected_regions(pdev[0], bars);
558 goto out;
559 }
560
561 ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
562 }
563
564 host = ide_host_alloc(d, hws, n_ports);
565 if (host == NULL) {
566 ret = -ENOMEM;
567 goto out_free_bars;
568 }
569
570 host->dev[0] = &dev1->dev;
571 if (dev2)
572 host->dev[1] = &dev2->dev;
573
574 host->host_priv = priv;
575 host->irq_flags = IRQF_SHARED;
576
577 pci_set_drvdata(pdev[0], host);
578 if (dev2)
579 pci_set_drvdata(pdev[1], host);
580
581 for (i = 0; i < n_ports / 2; i++) {
582 ret = do_ide_setup_pci_device(pdev[i], d, !i);
583
584
585
586
587
588 if (ret < 0)
589 goto out_free_bars;
590
591
592 if (ide_pci_is_in_compatibility_mode(pdev[i])) {
593 hw[i*2].irq = pci_get_legacy_ide_irq(pdev[i], 0);
594 hw[i*2 + 1].irq = pci_get_legacy_ide_irq(pdev[i], 1);
595 } else
596 hw[i*2 + 1].irq = hw[i*2].irq = ret;
597 }
598
599 ret = ide_host_register(host, d, hws);
600 if (ret)
601 ide_host_free(host);
602 else
603 goto out;
604
605out_free_bars:
606 i = n_ports / 2;
607 while (i--)
608 pci_release_selected_regions(pdev[i], bars);
609out:
610 return ret;
611}
612EXPORT_SYMBOL_GPL(ide_pci_init_two);
613
614int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
615 void *priv)
616{
617 return ide_pci_init_two(dev, NULL, d, priv);
618}
619EXPORT_SYMBOL_GPL(ide_pci_init_one);
620
621void ide_pci_remove(struct pci_dev *dev)
622{
623 struct ide_host *host = pci_get_drvdata(dev);
624 struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
625 int bars;
626
627 if (host->host_flags & IDE_HFLAG_SINGLE)
628 bars = (1 << 2) - 1;
629 else
630 bars = (1 << 4) - 1;
631
632 if ((host->host_flags & IDE_HFLAG_NO_DMA) == 0) {
633 if (host->host_flags & IDE_HFLAG_CS5520)
634 bars |= (1 << 2);
635 else
636 bars |= (1 << 4);
637 }
638
639 ide_host_remove(host);
640
641 if (dev2)
642 pci_release_selected_regions(dev2, bars);
643 pci_release_selected_regions(dev, bars);
644
645 if (dev2)
646 pci_disable_device(dev2);
647 pci_disable_device(dev);
648}
649EXPORT_SYMBOL_GPL(ide_pci_remove);
650
651#ifdef CONFIG_PM
652int ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
653{
654 pci_save_state(dev);
655 pci_disable_device(dev);
656 pci_set_power_state(dev, pci_choose_state(dev, state));
657
658 return 0;
659}
660EXPORT_SYMBOL_GPL(ide_pci_suspend);
661
662int ide_pci_resume(struct pci_dev *dev)
663{
664 struct ide_host *host = pci_get_drvdata(dev);
665 int rc;
666
667 pci_set_power_state(dev, PCI_D0);
668
669 rc = pci_enable_device(dev);
670 if (rc)
671 return rc;
672
673 pci_restore_state(dev);
674 pci_set_master(dev);
675
676 if (host->init_chipset)
677 host->init_chipset(dev);
678
679 return 0;
680}
681EXPORT_SYMBOL_GPL(ide_pci_resume);
682#endif
683