linux/drivers/ide/cmd64x.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
   4 *           Due to massive hardware bugs, UltraDMA is only supported
   5 *           on the 646U2 and not on the 646U.
   6 *
   7 * Copyright (C) 1998           Eddie C. Dost  (ecd@skynet.be)
   8 * Copyright (C) 1998           David S. Miller (davem@redhat.com)
   9 *
  10 * Copyright (C) 1999-2002      Andre Hedrick <andre@linux-ide.org>
  11 * Copyright (C) 2007-2010      Bartlomiej Zolnierkiewicz
  12 * Copyright (C) 2007,2009      MontaVista Software, Inc. <source@mvista.com>
  13 */
  14
  15#include <linux/module.h>
  16#include <linux/types.h>
  17#include <linux/pci.h>
  18#include <linux/ide.h>
  19#include <linux/init.h>
  20
  21#include <asm/io.h>
  22
  23#define DRV_NAME "cmd64x"
  24
  25/*
  26 * CMD64x specific registers definition.
  27 */
  28#define CFR             0x50
  29#define   CFR_INTR_CH0          0x04
  30
  31#define CMDTIM          0x52
  32#define ARTTIM0         0x53
  33#define DRWTIM0         0x54
  34#define ARTTIM1         0x55
  35#define DRWTIM1         0x56
  36#define ARTTIM23        0x57
  37#define   ARTTIM23_DIS_RA2      0x04
  38#define   ARTTIM23_DIS_RA3      0x08
  39#define   ARTTIM23_INTR_CH1     0x10
  40#define DRWTIM2         0x58
  41#define BRST            0x59
  42#define DRWTIM3         0x5b
  43
  44#define BMIDECR0        0x70
  45#define MRDMODE         0x71
  46#define   MRDMODE_INTR_CH0      0x04
  47#define   MRDMODE_INTR_CH1      0x08
  48#define UDIDETCR0       0x73
  49#define DTPR0           0x74
  50#define BMIDECR1        0x78
  51#define BMIDECSR        0x79
  52#define UDIDETCR1       0x7B
  53#define DTPR1           0x7C
  54
  55static void cmd64x_program_timings(ide_drive_t *drive, u8 mode)
  56{
  57        ide_hwif_t *hwif = drive->hwif;
  58        struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
  59        int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
  60        const unsigned long T = 1000000 / bus_speed;
  61        static const u8 recovery_values[] =
  62                {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0};
  63        static const u8 setup_values[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0};
  64        static const u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
  65        static const u8 drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM2, DRWTIM3};
  66        struct ide_timing t;
  67        u8 arttim = 0;
  68
  69        if (drive->dn >= ARRAY_SIZE(drwtim_regs))
  70                return;
  71
  72        ide_timing_compute(drive, mode, &t, T, 0);
  73
  74        /*
  75         * In case we've got too long recovery phase, try to lengthen
  76         * the active phase
  77         */
  78        if (t.recover > 16) {
  79                t.active += t.recover - 16;
  80                t.recover = 16;
  81        }
  82        if (t.active > 16)              /* shouldn't actually happen... */
  83                t.active = 16;
  84
  85        /*
  86         * Convert values to internal chipset representation
  87         */
  88        t.recover = recovery_values[t.recover];
  89        t.active &= 0x0f;
  90
  91        /* Program the active/recovery counts into the DRWTIM register */
  92        pci_write_config_byte(dev, drwtim_regs[drive->dn],
  93                              (t.active << 4) | t.recover);
  94
  95        /*
  96         * The primary channel has individual address setup timing registers
  97         * for each drive and the hardware selects the slowest timing itself.
  98         * The secondary channel has one common register and we have to select
  99         * the slowest address setup timing ourselves.
 100         */
 101        if (hwif->channel) {
 102                ide_drive_t *pair = ide_get_pair_dev(drive);
 103
 104                if (pair) {
 105                        struct ide_timing tp;
 106
 107                        ide_timing_compute(pair, pair->pio_mode, &tp, T, 0);
 108                        ide_timing_merge(&t, &tp, &t, IDE_TIMING_SETUP);
 109                        if (pair->dma_mode) {
 110                                ide_timing_compute(pair, pair->dma_mode,
 111                                                &tp, T, 0);
 112                                ide_timing_merge(&tp, &t, &t, IDE_TIMING_SETUP);
 113                        }
 114                }
 115        }
 116
 117        if (t.setup > 5)                /* shouldn't actually happen... */
 118                t.setup = 5;
 119
 120        /*
 121         * Program the address setup clocks into the ARTTIM registers.
 122         * Avoid clearing the secondary channel's interrupt bit.
 123         */
 124        (void) pci_read_config_byte (dev, arttim_regs[drive->dn], &arttim);
 125        if (hwif->channel)
 126                arttim &= ~ARTTIM23_INTR_CH1;
 127        arttim &= ~0xc0;
 128        arttim |= setup_values[t.setup];
 129        (void) pci_write_config_byte(dev, arttim_regs[drive->dn], arttim);
 130}
 131
 132/*
 133 * Attempts to set drive's PIO mode.
 134 * Special cases are 8: prefetch off, 9: prefetch on (both never worked)
 135 */
 136
 137static void cmd64x_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
 138{
 139        const u8 pio = drive->pio_mode - XFER_PIO_0;
 140
 141        /*
 142         * Filter out the prefetch control values
 143         * to prevent PIO5 from being programmed
 144         */
 145        if (pio == 8 || pio == 9)
 146                return;
 147
 148        cmd64x_program_timings(drive, XFER_PIO_0 + pio);
 149}
 150
 151static void cmd64x_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
 152{
 153        struct pci_dev *dev     = to_pci_dev(hwif->dev);
 154        u8 unit                 = drive->dn & 0x01;
 155        u8 regU = 0, pciU       = hwif->channel ? UDIDETCR1 : UDIDETCR0;
 156        const u8 speed          = drive->dma_mode;
 157
 158        pci_read_config_byte(dev, pciU, &regU);
 159        regU &= ~(unit ? 0xCA : 0x35);
 160
 161        switch(speed) {
 162        case XFER_UDMA_5:
 163                regU |= unit ? 0x0A : 0x05;
 164                break;
 165        case XFER_UDMA_4:
 166                regU |= unit ? 0x4A : 0x15;
 167                break;
 168        case XFER_UDMA_3:
 169                regU |= unit ? 0x8A : 0x25;
 170                break;
 171        case XFER_UDMA_2:
 172                regU |= unit ? 0x42 : 0x11;
 173                break;
 174        case XFER_UDMA_1:
 175                regU |= unit ? 0x82 : 0x21;
 176                break;
 177        case XFER_UDMA_0:
 178                regU |= unit ? 0xC2 : 0x31;
 179                break;
 180        case XFER_MW_DMA_2:
 181        case XFER_MW_DMA_1:
 182        case XFER_MW_DMA_0:
 183                cmd64x_program_timings(drive, speed);
 184                break;
 185        }
 186
 187        pci_write_config_byte(dev, pciU, regU);
 188}
 189
 190static void cmd648_clear_irq(ide_drive_t *drive)
 191{
 192        ide_hwif_t *hwif        = drive->hwif;
 193        struct pci_dev *dev     = to_pci_dev(hwif->dev);
 194        unsigned long base      = pci_resource_start(dev, 4);
 195        u8  irq_mask            = hwif->channel ? MRDMODE_INTR_CH1 :
 196                                                  MRDMODE_INTR_CH0;
 197        u8  mrdmode             = inb(base + 1);
 198
 199        /* clear the interrupt bit */
 200        outb((mrdmode & ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1)) | irq_mask,
 201             base + 1);
 202}
 203
 204static void cmd64x_clear_irq(ide_drive_t *drive)
 205{
 206        ide_hwif_t *hwif        = drive->hwif;
 207        struct pci_dev *dev     = to_pci_dev(hwif->dev);
 208        int irq_reg             = hwif->channel ? ARTTIM23 : CFR;
 209        u8  irq_mask            = hwif->channel ? ARTTIM23_INTR_CH1 :
 210                                                  CFR_INTR_CH0;
 211        u8  irq_stat            = 0;
 212
 213        (void) pci_read_config_byte(dev, irq_reg, &irq_stat);
 214        /* clear the interrupt bit */
 215        (void) pci_write_config_byte(dev, irq_reg, irq_stat | irq_mask);
 216}
 217
 218static int cmd648_test_irq(ide_hwif_t *hwif)
 219{
 220        struct pci_dev *dev     = to_pci_dev(hwif->dev);
 221        unsigned long base      = pci_resource_start(dev, 4);
 222        u8 irq_mask             = hwif->channel ? MRDMODE_INTR_CH1 :
 223                                                  MRDMODE_INTR_CH0;
 224        u8 mrdmode              = inb(base + 1);
 225
 226        pr_debug("%s: mrdmode: 0x%02x irq_mask: 0x%02x\n",
 227                 hwif->name, mrdmode, irq_mask);
 228
 229        return (mrdmode & irq_mask) ? 1 : 0;
 230}
 231
 232static int cmd64x_test_irq(ide_hwif_t *hwif)
 233{
 234        struct pci_dev *dev     = to_pci_dev(hwif->dev);
 235        int irq_reg             = hwif->channel ? ARTTIM23 : CFR;
 236        u8  irq_mask            = hwif->channel ? ARTTIM23_INTR_CH1 :
 237                                                  CFR_INTR_CH0;
 238        u8  irq_stat            = 0;
 239
 240        (void) pci_read_config_byte(dev, irq_reg, &irq_stat);
 241
 242        pr_debug("%s: irq_stat: 0x%02x irq_mask: 0x%02x\n",
 243                 hwif->name, irq_stat, irq_mask);
 244
 245        return (irq_stat & irq_mask) ? 1 : 0;
 246}
 247
 248/*
 249 * ASUS P55T2P4D with CMD646 chipset revision 0x01 requires the old
 250 * event order for DMA transfers.
 251 */
 252
 253static int cmd646_1_dma_end(ide_drive_t *drive)
 254{
 255        ide_hwif_t *hwif = drive->hwif;
 256        u8 dma_stat = 0, dma_cmd = 0;
 257
 258        /* get DMA status */
 259        dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
 260        /* read DMA command state */
 261        dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
 262        /* stop DMA */
 263        outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
 264        /* clear the INTR & ERROR bits */
 265        outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
 266        /* verify good DMA status */
 267        return (dma_stat & 7) != 4;
 268}
 269
 270static int init_chipset_cmd64x(struct pci_dev *dev)
 271{
 272        u8 mrdmode = 0;
 273
 274        /* Set a good latency timer and cache line size value. */
 275        (void) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
 276        /* FIXME: pci_set_master() to ensure a good latency timer value */
 277
 278        /*
 279         * Enable interrupts, select MEMORY READ LINE for reads.
 280         *
 281         * NOTE: although not mentioned in the PCI0646U specs,
 282         * bits 0-1 are write only and won't be read back as
 283         * set or not -- PCI0646U2 specs clarify this point.
 284         */
 285        (void) pci_read_config_byte (dev, MRDMODE, &mrdmode);
 286        mrdmode &= ~0x30;
 287        (void) pci_write_config_byte(dev, MRDMODE, (mrdmode | 0x02));
 288
 289        return 0;
 290}
 291
 292static u8 cmd64x_cable_detect(ide_hwif_t *hwif)
 293{
 294        struct pci_dev  *dev    = to_pci_dev(hwif->dev);
 295        u8 bmidecsr = 0, mask   = hwif->channel ? 0x02 : 0x01;
 296
 297        switch (dev->device) {
 298        case PCI_DEVICE_ID_CMD_648:
 299        case PCI_DEVICE_ID_CMD_649:
 300                pci_read_config_byte(dev, BMIDECSR, &bmidecsr);
 301                return (bmidecsr & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
 302        default:
 303                return ATA_CBL_PATA40;
 304        }
 305}
 306
 307static const struct ide_port_ops cmd64x_port_ops = {
 308        .set_pio_mode           = cmd64x_set_pio_mode,
 309        .set_dma_mode           = cmd64x_set_dma_mode,
 310        .clear_irq              = cmd64x_clear_irq,
 311        .test_irq               = cmd64x_test_irq,
 312        .cable_detect           = cmd64x_cable_detect,
 313};
 314
 315static const struct ide_port_ops cmd648_port_ops = {
 316        .set_pio_mode           = cmd64x_set_pio_mode,
 317        .set_dma_mode           = cmd64x_set_dma_mode,
 318        .clear_irq              = cmd648_clear_irq,
 319        .test_irq               = cmd648_test_irq,
 320        .cable_detect           = cmd64x_cable_detect,
 321};
 322
 323static const struct ide_dma_ops cmd646_rev1_dma_ops = {
 324        .dma_host_set           = ide_dma_host_set,
 325        .dma_setup              = ide_dma_setup,
 326        .dma_start              = ide_dma_start,
 327        .dma_end                = cmd646_1_dma_end,
 328        .dma_test_irq           = ide_dma_test_irq,
 329        .dma_lost_irq           = ide_dma_lost_irq,
 330        .dma_timer_expiry       = ide_dma_sff_timer_expiry,
 331        .dma_sff_read_status    = ide_dma_sff_read_status,
 332};
 333
 334static const struct ide_port_info cmd64x_chipsets[] = {
 335        {       /* 0: CMD643 */
 336                .name           = DRV_NAME,
 337                .init_chipset   = init_chipset_cmd64x,
 338                .enablebits     = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
 339                .port_ops       = &cmd64x_port_ops,
 340                .host_flags     = IDE_HFLAG_CLEAR_SIMPLEX |
 341                                  IDE_HFLAG_ABUSE_PREFETCH |
 342                                  IDE_HFLAG_SERIALIZE,
 343                .pio_mask       = ATA_PIO5,
 344                .mwdma_mask     = ATA_MWDMA2,
 345                .udma_mask      = 0x00, /* no udma */
 346        },
 347        {       /* 1: CMD646 */
 348                .name           = DRV_NAME,
 349                .init_chipset   = init_chipset_cmd64x,
 350                .enablebits     = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
 351                .port_ops       = &cmd648_port_ops,
 352                .host_flags     = IDE_HFLAG_ABUSE_PREFETCH |
 353                                  IDE_HFLAG_SERIALIZE,
 354                .pio_mask       = ATA_PIO5,
 355                .mwdma_mask     = ATA_MWDMA2,
 356                .udma_mask      = ATA_UDMA2,
 357        },
 358        {       /* 2: CMD648 */
 359                .name           = DRV_NAME,
 360                .init_chipset   = init_chipset_cmd64x,
 361                .enablebits     = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
 362                .port_ops       = &cmd648_port_ops,
 363                .host_flags     = IDE_HFLAG_ABUSE_PREFETCH,
 364                .pio_mask       = ATA_PIO5,
 365                .mwdma_mask     = ATA_MWDMA2,
 366                .udma_mask      = ATA_UDMA4,
 367        },
 368        {       /* 3: CMD649 */
 369                .name           = DRV_NAME,
 370                .init_chipset   = init_chipset_cmd64x,
 371                .enablebits     = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
 372                .port_ops       = &cmd648_port_ops,
 373                .host_flags     = IDE_HFLAG_ABUSE_PREFETCH,
 374                .pio_mask       = ATA_PIO5,
 375                .mwdma_mask     = ATA_MWDMA2,
 376                .udma_mask      = ATA_UDMA5,
 377        }
 378};
 379
 380static int cmd64x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 381{
 382        struct ide_port_info d;
 383        u8 idx = id->driver_data;
 384
 385        d = cmd64x_chipsets[idx];
 386
 387        if (idx == 1) {
 388                /*
 389                 * UltraDMA only supported on PCI646U and PCI646U2, which
 390                 * correspond to revisions 0x03, 0x05 and 0x07 respectively.
 391                 * Actually, although the CMD tech support people won't
 392                 * tell me the details, the 0x03 revision cannot support
 393                 * UDMA correctly without hardware modifications, and even
 394                 * then it only works with Quantum disks due to some
 395                 * hold time assumptions in the 646U part which are fixed
 396                 * in the 646U2.
 397                 *
 398                 * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
 399                 */
 400                if (dev->revision < 5) {
 401                        d.udma_mask = 0x00;
 402                        /*
 403                         * The original PCI0646 didn't have the primary
 404                         * channel enable bit, it appeared starting with
 405                         * PCI0646U (i.e. revision ID 3).
 406                         */
 407                        if (dev->revision < 3) {
 408                                d.enablebits[0].reg = 0;
 409                                d.port_ops = &cmd64x_port_ops;
 410                                if (dev->revision == 1)
 411                                        d.dma_ops = &cmd646_rev1_dma_ops;
 412                        }
 413                }
 414        }
 415
 416        return ide_pci_init_one(dev, &d, NULL);
 417}
 418
 419static const struct pci_device_id cmd64x_pci_tbl[] = {
 420        { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 },
 421        { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 },
 422        { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_648), 2 },
 423        { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_649), 3 },
 424        { 0, },
 425};
 426MODULE_DEVICE_TABLE(pci, cmd64x_pci_tbl);
 427
 428static struct pci_driver cmd64x_pci_driver = {
 429        .name           = "CMD64x_IDE",
 430        .id_table       = cmd64x_pci_tbl,
 431        .probe          = cmd64x_init_one,
 432        .remove         = ide_pci_remove,
 433        .suspend        = ide_pci_suspend,
 434        .resume         = ide_pci_resume,
 435};
 436
 437static int __init cmd64x_ide_init(void)
 438{
 439        return ide_pci_register_driver(&cmd64x_pci_driver);
 440}
 441
 442static void __exit cmd64x_ide_exit(void)
 443{
 444        pci_unregister_driver(&cmd64x_pci_driver);
 445}
 446
 447module_init(cmd64x_ide_init);
 448module_exit(cmd64x_ide_exit);
 449
 450MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick, Bartlomiej Zolnierkiewicz");
 451MODULE_DESCRIPTION("PCI driver module for CMD64x IDE");
 452MODULE_LICENSE("GPL");
 453