1
2
3
4
5
6
7
8
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/pci.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/ide.h>
15#include <linux/dma-mapping.h>
16
17#include <asm/io.h>
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32static int ide_setup_pci_baseregs(struct pci_dev *dev, const char *name)
33{
34 u8 progif = 0;
35
36
37
38
39 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
40 (progif & 5) != 5) {
41 if ((progif & 0xa) != 0xa) {
42 printk(KERN_INFO "%s %s: device not capable of full "
43 "native PCI mode\n", name, pci_name(dev));
44 return -EOPNOTSUPP;
45 }
46 printk(KERN_INFO "%s %s: placing both ports into native PCI "
47 "mode\n", name, pci_name(dev));
48 (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
49 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
50 (progif & 5) != 5) {
51 printk(KERN_ERR "%s %s: rewrite of PROGIF failed, "
52 "wanted 0x%04x, got 0x%04x\n",
53 name, pci_name(dev), progif | 5, progif);
54 return -EOPNOTSUPP;
55 }
56 }
57 return 0;
58}
59
60#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
61static int ide_pci_clear_simplex(unsigned long dma_base, const char *name)
62{
63 u8 dma_stat = inb(dma_base + 2);
64
65 outb(dma_stat & 0x60, dma_base + 2);
66 dma_stat = inb(dma_base + 2);
67
68 return (dma_stat & 0x80) ? 1 : 0;
69}
70
71
72
73
74
75
76
77
78
79unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
80{
81 struct pci_dev *dev = to_pci_dev(hwif->dev);
82 unsigned long dma_base = 0;
83
84 if (hwif->host_flags & IDE_HFLAG_MMIO)
85 return hwif->dma_base;
86
87 if (hwif->mate && hwif->mate->dma_base) {
88 dma_base = hwif->mate->dma_base - (hwif->channel ? 0 : 8);
89 } else {
90 u8 baridx = (d->host_flags & IDE_HFLAG_CS5520) ? 2 : 4;
91
92 dma_base = pci_resource_start(dev, baridx);
93
94 if (dma_base == 0) {
95 printk(KERN_ERR "%s %s: DMA base is invalid\n",
96 d->name, pci_name(dev));
97 return 0;
98 }
99 }
100
101 if (hwif->channel)
102 dma_base += 8;
103
104 return dma_base;
105}
106EXPORT_SYMBOL_GPL(ide_pci_dma_base);
107
108int ide_pci_check_simplex(ide_hwif_t *hwif, const struct ide_port_info *d)
109{
110 struct pci_dev *dev = to_pci_dev(hwif->dev);
111 u8 dma_stat;
112
113 if (d->host_flags & (IDE_HFLAG_MMIO | IDE_HFLAG_CS5520))
114 goto out;
115
116 if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) {
117 if (ide_pci_clear_simplex(hwif->dma_base, d->name))
118 printk(KERN_INFO "%s %s: simplex device: DMA forced\n",
119 d->name, pci_name(dev));
120 goto out;
121 }
122
123
124
125
126
127
128
129
130
131
132
133 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
134 if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) {
135 printk(KERN_INFO "%s %s: simplex device: DMA disabled\n",
136 d->name, pci_name(dev));
137 return -1;
138 }
139out:
140 return 0;
141}
142EXPORT_SYMBOL_GPL(ide_pci_check_simplex);
143
144
145
146
147int ide_pci_set_master(struct pci_dev *dev, const char *name)
148{
149 u16 pcicmd;
150
151 pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
152
153 if ((pcicmd & PCI_COMMAND_MASTER) == 0) {
154 pci_set_master(dev);
155
156 if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) ||
157 (pcicmd & PCI_COMMAND_MASTER) == 0) {
158 printk(KERN_ERR "%s %s: error updating PCICMD\n",
159 name, pci_name(dev));
160 return -EIO;
161 }
162 }
163
164 return 0;
165}
166EXPORT_SYMBOL_GPL(ide_pci_set_master);
167#endif
168
169void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
170{
171 printk(KERN_INFO "%s %s: IDE controller (0x%04x:0x%04x rev 0x%02x)\n",
172 d->name, pci_name(dev),
173 dev->vendor, dev->device, dev->revision);
174}
175EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
192{
193 int ret, bars;
194
195 if (pci_enable_device(dev)) {
196 ret = pci_enable_device_io(dev);
197 if (ret < 0) {
198 printk(KERN_WARNING "%s %s: couldn't enable device\n",
199 d->name, pci_name(dev));
200 goto out;
201 }
202 printk(KERN_WARNING "%s %s: BIOS configuration fixed\n",
203 d->name, pci_name(dev));
204 }
205
206
207
208
209
210
211 ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
212 if (ret < 0) {
213 printk(KERN_ERR "%s %s: can't set DMA mask\n",
214 d->name, pci_name(dev));
215 goto out;
216 }
217
218 if (d->host_flags & IDE_HFLAG_SINGLE)
219 bars = (1 << 2) - 1;
220 else
221 bars = (1 << 4) - 1;
222
223 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
224 if (d->host_flags & IDE_HFLAG_CS5520)
225 bars |= (1 << 2);
226 else
227 bars |= (1 << 4);
228 }
229
230 ret = pci_request_selected_regions(dev, bars, d->name);
231 if (ret < 0)
232 printk(KERN_ERR "%s %s: can't reserve resources\n",
233 d->name, pci_name(dev));
234out:
235 return ret;
236}
237
238
239
240
241
242
243
244
245
246
247static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
248{
249 u16 pcicmd = 0;
250
251
252
253
254
255
256
257 if (ide_setup_pci_baseregs(dev, d->name) ||
258 pci_write_config_word(dev, PCI_COMMAND, pcicmd | PCI_COMMAND_IO)) {
259 printk(KERN_INFO "%s %s: device disabled (BIOS)\n",
260 d->name, pci_name(dev));
261 return -ENODEV;
262 }
263 if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd)) {
264 printk(KERN_ERR "%s %s: error accessing PCI regs\n",
265 d->name, pci_name(dev));
266 return -EIO;
267 }
268 if (!(pcicmd & PCI_COMMAND_IO)) {
269 printk(KERN_ERR "%s %s: unable to enable IDE controller\n",
270 d->name, pci_name(dev));
271 return -ENXIO;
272 }
273 return 0;
274}
275
276
277
278
279
280
281
282
283
284
285
286static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *d,
287 int bar)
288{
289 ulong flags = pci_resource_flags(dev, bar);
290
291
292 if (!flags || pci_resource_len(dev, bar) == 0)
293 return 0;
294
295
296 if (flags & IORESOURCE_IO)
297 return 0;
298
299
300 return -EINVAL;
301}
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
318 unsigned int port, struct ide_hw *hw)
319{
320 unsigned long ctl = 0, base = 0;
321
322 if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
323 if (ide_pci_check_iomem(dev, d, 2 * port) ||
324 ide_pci_check_iomem(dev, d, 2 * port + 1)) {
325 printk(KERN_ERR "%s %s: I/O baseregs (BIOS) are "
326 "reported as MEM for port %d!\n",
327 d->name, pci_name(dev), port);
328 return -EINVAL;
329 }
330
331 ctl = pci_resource_start(dev, 2*port+1);
332 base = pci_resource_start(dev, 2*port);
333 } else {
334
335 ctl = port ? 0x374 : 0x3f4;
336 base = port ? 0x170 : 0x1f0;
337 }
338
339 if (!base || !ctl) {
340 printk(KERN_ERR "%s %s: bad PCI BARs for port %d, skipping\n",
341 d->name, pci_name(dev), port);
342 return -EINVAL;
343 }
344
345 memset(hw, 0, sizeof(*hw));
346 hw->dev = &dev->dev;
347 ide_std_init_ports(hw, base, ctl | 2);
348
349 return 0;
350}
351
352#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
353
354
355
356
357
358
359
360
361
362
363int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
364{
365 struct pci_dev *dev = to_pci_dev(hwif->dev);
366
367 if ((d->host_flags & IDE_HFLAG_NO_AUTODMA) == 0 ||
368 ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
369 (dev->class & 0x80))) {
370 unsigned long base = ide_pci_dma_base(hwif, d);
371
372 if (base == 0)
373 return -1;
374
375 hwif->dma_base = base;
376
377 if (hwif->dma_ops == NULL)
378 hwif->dma_ops = &sff_dma_ops;
379
380 if (ide_pci_check_simplex(hwif, d) < 0)
381 return -1;
382
383 if (ide_pci_set_master(dev, d->name) < 0)
384 return -1;
385
386 if (hwif->host_flags & IDE_HFLAG_MMIO)
387 printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
388 else
389 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
390 hwif->name, base, base + 7);
391
392 hwif->extra_base = base + (hwif->channel ? 8 : 16);
393
394 if (ide_allocate_dma_engine(hwif))
395 return -1;
396 }
397
398 return 0;
399}
400#endif
401
402
403
404
405
406
407
408
409
410
411
412
413static int ide_setup_pci_controller(struct pci_dev *dev,
414 const struct ide_port_info *d, int noisy)
415{
416 int ret;
417 u16 pcicmd;
418
419 if (noisy)
420 ide_setup_pci_noise(dev, d);
421
422 ret = ide_pci_enable(dev, d);
423 if (ret < 0)
424 goto out;
425
426 ret = pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
427 if (ret < 0) {
428 printk(KERN_ERR "%s %s: error accessing PCI regs\n",
429 d->name, pci_name(dev));
430 goto out;
431 }
432 if (!(pcicmd & PCI_COMMAND_IO)) {
433 ret = ide_pci_configure(dev, d);
434 if (ret < 0)
435 goto out;
436 printk(KERN_INFO "%s %s: device enabled (Linux)\n",
437 d->name, pci_name(dev));
438 }
439
440out:
441 return ret;
442}
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
461 struct ide_hw *hw, struct ide_hw **hws)
462{
463 int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
464 u8 tmp;
465
466
467
468
469
470 for (port = 0; port < channels; ++port) {
471 const struct ide_pci_enablebit *e = &d->enablebits[port];
472
473 if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) ||
474 (tmp & e->mask) != e->val)) {
475 printk(KERN_INFO "%s %s: IDE port disabled\n",
476 d->name, pci_name(dev));
477 continue;
478 }
479
480 if (ide_hw_configure(dev, d, port, hw + port))
481 continue;
482
483 *(hws + port) = hw + port;
484 }
485}
486EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
487
488
489
490
491
492
493
494
495
496
497
498static int do_ide_setup_pci_device(struct pci_dev *dev,
499 const struct ide_port_info *d,
500 u8 noisy)
501{
502 int pciirq, ret;
503
504
505
506
507 pciirq = dev->irq;
508
509
510
511
512
513
514
515 ret = d->init_chipset ? d->init_chipset(dev) : 0;
516 if (ret < 0)
517 goto out;
518
519 if (ide_pci_is_in_compatibility_mode(dev)) {
520 if (noisy)
521 printk(KERN_INFO "%s %s: not 100%% native mode: will "
522 "probe irqs later\n", d->name, pci_name(dev));
523 pciirq = 0;
524 } else if (!pciirq && noisy) {
525 printk(KERN_WARNING "%s %s: bad irq (%d): will probe later\n",
526 d->name, pci_name(dev), pciirq);
527 } else if (noisy) {
528 printk(KERN_INFO "%s %s: 100%% native mode on irq %d\n",
529 d->name, pci_name(dev), pciirq);
530 }
531
532 ret = pciirq;
533out:
534 return ret;
535}
536
537int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
538 const struct ide_port_info *d, void *priv)
539{
540 struct pci_dev *pdev[] = { dev1, dev2 };
541 struct ide_host *host;
542 int ret, i, n_ports = dev2 ? 4 : 2;
543 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
544
545 for (i = 0; i < n_ports / 2; i++) {
546 ret = ide_setup_pci_controller(pdev[i], d, !i);
547 if (ret < 0)
548 goto out;
549
550 ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
551 }
552
553 host = ide_host_alloc(d, hws, n_ports);
554 if (host == NULL) {
555 ret = -ENOMEM;
556 goto out;
557 }
558
559 host->dev[0] = &dev1->dev;
560 if (dev2)
561 host->dev[1] = &dev2->dev;
562
563 host->host_priv = priv;
564 host->irq_flags = IRQF_SHARED;
565
566 pci_set_drvdata(pdev[0], host);
567 if (dev2)
568 pci_set_drvdata(pdev[1], host);
569
570 for (i = 0; i < n_ports / 2; i++) {
571 ret = do_ide_setup_pci_device(pdev[i], d, !i);
572
573
574
575
576
577 if (ret < 0)
578 goto out;
579
580
581 if (ide_pci_is_in_compatibility_mode(pdev[i])) {
582 hw[i*2].irq = pci_get_legacy_ide_irq(pdev[i], 0);
583 hw[i*2 + 1].irq = pci_get_legacy_ide_irq(pdev[i], 1);
584 } else
585 hw[i*2 + 1].irq = hw[i*2].irq = ret;
586 }
587
588 ret = ide_host_register(host, d, hws);
589 if (ret)
590 ide_host_free(host);
591out:
592 return ret;
593}
594EXPORT_SYMBOL_GPL(ide_pci_init_two);
595
596int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
597 void *priv)
598{
599 return ide_pci_init_two(dev, NULL, d, priv);
600}
601EXPORT_SYMBOL_GPL(ide_pci_init_one);
602
603void ide_pci_remove(struct pci_dev *dev)
604{
605 struct ide_host *host = pci_get_drvdata(dev);
606 struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
607 int bars;
608
609 if (host->host_flags & IDE_HFLAG_SINGLE)
610 bars = (1 << 2) - 1;
611 else
612 bars = (1 << 4) - 1;
613
614 if ((host->host_flags & IDE_HFLAG_NO_DMA) == 0) {
615 if (host->host_flags & IDE_HFLAG_CS5520)
616 bars |= (1 << 2);
617 else
618 bars |= (1 << 4);
619 }
620
621 ide_host_remove(host);
622
623 if (dev2)
624 pci_release_selected_regions(dev2, bars);
625 pci_release_selected_regions(dev, bars);
626
627 if (dev2)
628 pci_disable_device(dev2);
629 pci_disable_device(dev);
630}
631EXPORT_SYMBOL_GPL(ide_pci_remove);
632
633#ifdef CONFIG_PM
634int ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
635{
636 pci_save_state(dev);
637 pci_disable_device(dev);
638 pci_set_power_state(dev, pci_choose_state(dev, state));
639
640 return 0;
641}
642EXPORT_SYMBOL_GPL(ide_pci_suspend);
643
644int ide_pci_resume(struct pci_dev *dev)
645{
646 struct ide_host *host = pci_get_drvdata(dev);
647 int rc;
648
649 pci_set_power_state(dev, PCI_D0);
650
651 rc = pci_enable_device(dev);
652 if (rc)
653 return rc;
654
655 pci_restore_state(dev);
656 pci_set_master(dev);
657
658 if (host->init_chipset)
659 host->init_chipset(dev);
660
661 return 0;
662}
663EXPORT_SYMBOL_GPL(ide_pci_resume);
664#endif
665