linux/drivers/ide/ide-dma.c
<<
>>
Prefs
   1/*
   2 *  IDE DMA support (including IDE PCI BM-DMA).
   3 *
   4 *  Copyright (C) 1995-1998   Mark Lord
   5 *  Copyright (C) 1999-2000   Andre Hedrick <andre@linux-ide.org>
   6 *  Copyright (C) 2004, 2007  Bartlomiej Zolnierkiewicz
   7 *
   8 *  May be copied or modified under the terms of the GNU General Public License
   9 *
  10 *  DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
  11 */
  12
  13/*
  14 *  Special Thanks to Mark for his Six years of work.
  15 */
  16
  17/*
  18 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
  19 * fixing the problem with the BIOS on some Acer motherboards.
  20 *
  21 * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
  22 * "TX" chipset compatibility and for providing patches for the "TX" chipset.
  23 *
  24 * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
  25 * at generic DMA -- his patches were referred to when preparing this code.
  26 *
  27 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
  28 * for supplying a Promise UDMA board & WD UDMA drive for this work!
  29 */
  30
  31#include <linux/types.h>
  32#include <linux/gfp.h>
  33#include <linux/kernel.h>
  34#include <linux/ide.h>
  35#include <linux/scatterlist.h>
  36#include <linux/dma-mapping.h>
  37
  38static const struct drive_list_entry drive_whitelist[] = {
  39        { "Micropolis 2112A"    ,       NULL            },
  40        { "CONNER CTMA 4000"    ,       NULL            },
  41        { "CONNER CTT8000-A"    ,       NULL            },
  42        { "ST34342A"            ,       NULL            },
  43        { NULL                  ,       NULL            }
  44};
  45
  46static const struct drive_list_entry drive_blacklist[] = {
  47        { "WDC AC11000H"        ,       NULL            },
  48        { "WDC AC22100H"        ,       NULL            },
  49        { "WDC AC32500H"        ,       NULL            },
  50        { "WDC AC33100H"        ,       NULL            },
  51        { "WDC AC31600H"        ,       NULL            },
  52        { "WDC AC32100H"        ,       "24.09P07"      },
  53        { "WDC AC23200L"        ,       "21.10N21"      },
  54        { "Compaq CRD-8241B"    ,       NULL            },
  55        { "CRD-8400B"           ,       NULL            },
  56        { "CRD-8480B",                  NULL            },
  57        { "CRD-8482B",                  NULL            },
  58        { "CRD-84"              ,       NULL            },
  59        { "SanDisk SDP3B"       ,       NULL            },
  60        { "SanDisk SDP3B-64"    ,       NULL            },
  61        { "SANYO CD-ROM CRD"    ,       NULL            },
  62        { "HITACHI CDR-8"       ,       NULL            },
  63        { "HITACHI CDR-8335"    ,       NULL            },
  64        { "HITACHI CDR-8435"    ,       NULL            },
  65        { "Toshiba CD-ROM XM-6202B"     ,       NULL            },
  66        { "TOSHIBA CD-ROM XM-1702BC",   NULL            },
  67        { "CD-532E-A"           ,       NULL            },
  68        { "E-IDE CD-ROM CR-840",        NULL            },
  69        { "CD-ROM Drive/F5A",   NULL            },
  70        { "WPI CDD-820",                NULL            },
  71        { "SAMSUNG CD-ROM SC-148C",     NULL            },
  72        { "SAMSUNG CD-ROM SC",  NULL            },
  73        { "ATAPI CD-ROM DRIVE 40X MAXIMUM",     NULL            },
  74        { "_NEC DV5800A",               NULL            },
  75        { "SAMSUNG CD-ROM SN-124",      "N001" },
  76        { "Seagate STT20000A",          NULL  },
  77        { "CD-ROM CDR_U200",            "1.09" },
  78        { NULL                  ,       NULL            }
  79
  80};
  81
  82/**
  83 *      ide_dma_intr    -       IDE DMA interrupt handler
  84 *      @drive: the drive the interrupt is for
  85 *
  86 *      Handle an interrupt completing a read/write DMA transfer on an
  87 *      IDE device
  88 */
  89
  90ide_startstop_t ide_dma_intr(ide_drive_t *drive)
  91{
  92        ide_hwif_t *hwif = drive->hwif;
  93        struct ide_cmd *cmd = &hwif->cmd;
  94        u8 stat = 0, dma_stat = 0;
  95
  96        drive->waiting_for_dma = 0;
  97        dma_stat = hwif->dma_ops->dma_end(drive);
  98        ide_dma_unmap_sg(drive, cmd);
  99        stat = hwif->tp_ops->read_status(hwif);
 100
 101        if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) {
 102                if (!dma_stat) {
 103                        if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
 104                                ide_finish_cmd(drive, cmd, stat);
 105                        else
 106                                ide_complete_rq(drive, 0,
 107                                                blk_rq_sectors(cmd->rq) << 9);
 108                        return ide_stopped;
 109                }
 110                printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
 111                        drive->name, __func__, dma_stat);
 112        }
 113        return ide_error(drive, "dma_intr", stat);
 114}
 115
 116int ide_dma_good_drive(ide_drive_t *drive)
 117{
 118        return ide_in_drive_list(drive->id, drive_whitelist);
 119}
 120
 121/**
 122 *      ide_dma_map_sg  -       map IDE scatter gather for DMA I/O
 123 *      @drive: the drive to map the DMA table for
 124 *      @cmd: command
 125 *
 126 *      Perform the DMA mapping magic necessary to access the source or
 127 *      target buffers of a request via DMA.  The lower layers of the
 128 *      kernel provide the necessary cache management so that we can
 129 *      operate in a portable fashion.
 130 */
 131
 132static int ide_dma_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
 133{
 134        ide_hwif_t *hwif = drive->hwif;
 135        struct scatterlist *sg = hwif->sg_table;
 136        int i;
 137
 138        if (cmd->tf_flags & IDE_TFLAG_WRITE)
 139                cmd->sg_dma_direction = DMA_TO_DEVICE;
 140        else
 141                cmd->sg_dma_direction = DMA_FROM_DEVICE;
 142
 143        i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction);
 144        if (i) {
 145                cmd->orig_sg_nents = cmd->sg_nents;
 146                cmd->sg_nents = i;
 147        }
 148
 149        return i;
 150}
 151
 152/**
 153 *      ide_dma_unmap_sg        -       clean up DMA mapping
 154 *      @drive: The drive to unmap
 155 *
 156 *      Teardown mappings after DMA has completed. This must be called
 157 *      after the completion of each use of ide_build_dmatable and before
 158 *      the next use of ide_build_dmatable. Failure to do so will cause
 159 *      an oops as only one mapping can be live for each target at a given
 160 *      time.
 161 */
 162
 163void ide_dma_unmap_sg(ide_drive_t *drive, struct ide_cmd *cmd)
 164{
 165        ide_hwif_t *hwif = drive->hwif;
 166
 167        dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents,
 168                     cmd->sg_dma_direction);
 169}
 170EXPORT_SYMBOL_GPL(ide_dma_unmap_sg);
 171
 172/**
 173 *      ide_dma_off_quietly     -       Generic DMA kill
 174 *      @drive: drive to control
 175 *
 176 *      Turn off the current DMA on this IDE controller.
 177 */
 178
 179void ide_dma_off_quietly(ide_drive_t *drive)
 180{
 181        drive->dev_flags &= ~IDE_DFLAG_USING_DMA;
 182        ide_toggle_bounce(drive, 0);
 183
 184        drive->hwif->dma_ops->dma_host_set(drive, 0);
 185}
 186EXPORT_SYMBOL(ide_dma_off_quietly);
 187
 188/**
 189 *      ide_dma_off     -       disable DMA on a device
 190 *      @drive: drive to disable DMA on
 191 *
 192 *      Disable IDE DMA for a device on this IDE controller.
 193 *      Inform the user that DMA has been disabled.
 194 */
 195
 196void ide_dma_off(ide_drive_t *drive)
 197{
 198        printk(KERN_INFO "%s: DMA disabled\n", drive->name);
 199        ide_dma_off_quietly(drive);
 200}
 201EXPORT_SYMBOL(ide_dma_off);
 202
 203/**
 204 *      ide_dma_on              -       Enable DMA on a device
 205 *      @drive: drive to enable DMA on
 206 *
 207 *      Enable IDE DMA for a device on this IDE controller.
 208 */
 209
 210void ide_dma_on(ide_drive_t *drive)
 211{
 212        drive->dev_flags |= IDE_DFLAG_USING_DMA;
 213        ide_toggle_bounce(drive, 1);
 214
 215        drive->hwif->dma_ops->dma_host_set(drive, 1);
 216}
 217
 218int __ide_dma_bad_drive(ide_drive_t *drive)
 219{
 220        u16 *id = drive->id;
 221
 222        int blacklist = ide_in_drive_list(id, drive_blacklist);
 223        if (blacklist) {
 224                printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
 225                                    drive->name, (char *)&id[ATA_ID_PROD]);
 226                return blacklist;
 227        }
 228        return 0;
 229}
 230EXPORT_SYMBOL(__ide_dma_bad_drive);
 231
 232static const u8 xfer_mode_bases[] = {
 233        XFER_UDMA_0,
 234        XFER_MW_DMA_0,
 235        XFER_SW_DMA_0,
 236};
 237
 238static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
 239{
 240        u16 *id = drive->id;
 241        ide_hwif_t *hwif = drive->hwif;
 242        const struct ide_port_ops *port_ops = hwif->port_ops;
 243        unsigned int mask = 0;
 244
 245        switch (base) {
 246        case XFER_UDMA_0:
 247                if ((id[ATA_ID_FIELD_VALID] & 4) == 0)
 248                        break;
 249                mask = id[ATA_ID_UDMA_MODES];
 250                if (port_ops && port_ops->udma_filter)
 251                        mask &= port_ops->udma_filter(drive);
 252                else
 253                        mask &= hwif->ultra_mask;
 254
 255                /*
 256                 * avoid false cable warning from eighty_ninty_three()
 257                 */
 258                if (req_mode > XFER_UDMA_2) {
 259                        if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
 260                                mask &= 0x07;
 261                }
 262                break;
 263        case XFER_MW_DMA_0:
 264                mask = id[ATA_ID_MWDMA_MODES];
 265
 266                /* Also look for the CF specific MWDMA modes... */
 267                if (ata_id_is_cfa(id) && (id[ATA_ID_CFA_MODES] & 0x38)) {
 268                        u8 mode = ((id[ATA_ID_CFA_MODES] & 0x38) >> 3) - 1;
 269
 270                        mask |= ((2 << mode) - 1) << 3;
 271                }
 272
 273                if (port_ops && port_ops->mdma_filter)
 274                        mask &= port_ops->mdma_filter(drive);
 275                else
 276                        mask &= hwif->mwdma_mask;
 277                break;
 278        case XFER_SW_DMA_0:
 279                mask = id[ATA_ID_SWDMA_MODES];
 280                if (!(mask & ATA_SWDMA2) && (id[ATA_ID_OLD_DMA_MODES] >> 8)) {
 281                        u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8;
 282
 283                        /*
 284                         * if the mode is valid convert it to the mask
 285                         * (the maximum allowed mode is XFER_SW_DMA_2)
 286                         */
 287                        if (mode <= 2)
 288                                mask = (2 << mode) - 1;
 289                }
 290                mask &= hwif->swdma_mask;
 291                break;
 292        default:
 293                BUG();
 294                break;
 295        }
 296
 297        return mask;
 298}
 299
 300/**
 301 *      ide_find_dma_mode       -       compute DMA speed
 302 *      @drive: IDE device
 303 *      @req_mode: requested mode
 304 *
 305 *      Checks the drive/host capabilities and finds the speed to use for
 306 *      the DMA transfer.  The speed is then limited by the requested mode.
 307 *
 308 *      Returns 0 if the drive/host combination is incapable of DMA transfers
 309 *      or if the requested mode is not a DMA mode.
 310 */
 311
 312u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
 313{
 314        ide_hwif_t *hwif = drive->hwif;
 315        unsigned int mask;
 316        int x, i;
 317        u8 mode = 0;
 318
 319        if (drive->media != ide_disk) {
 320                if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
 321                        return 0;
 322        }
 323
 324        for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
 325                if (req_mode < xfer_mode_bases[i])
 326                        continue;
 327                mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
 328                x = fls(mask) - 1;
 329                if (x >= 0) {
 330                        mode = xfer_mode_bases[i] + x;
 331                        break;
 332                }
 333        }
 334
 335        if (hwif->chipset == ide_acorn && mode == 0) {
 336                /*
 337                 * is this correct?
 338                 */
 339                if (ide_dma_good_drive(drive) &&
 340                    drive->id[ATA_ID_EIDE_DMA_TIME] < 150)
 341                        mode = XFER_MW_DMA_1;
 342        }
 343
 344        mode = min(mode, req_mode);
 345
 346        printk(KERN_INFO "%s: %s mode selected\n", drive->name,
 347                          mode ? ide_xfer_verbose(mode) : "no DMA");
 348
 349        return mode;
 350}
 351
 352static int ide_tune_dma(ide_drive_t *drive)
 353{
 354        ide_hwif_t *hwif = drive->hwif;
 355        u8 speed;
 356
 357        if (ata_id_has_dma(drive->id) == 0 ||
 358            (drive->dev_flags & IDE_DFLAG_NODMA))
 359                return 0;
 360
 361        /* consult the list of known "bad" drives */
 362        if (__ide_dma_bad_drive(drive))
 363                return 0;
 364
 365        if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
 366                return config_drive_for_dma(drive);
 367
 368        speed = ide_max_dma_mode(drive);
 369
 370        if (!speed)
 371                return 0;
 372
 373        if (ide_set_dma_mode(drive, speed))
 374                return 0;
 375
 376        return 1;
 377}
 378
 379static int ide_dma_check(ide_drive_t *drive)
 380{
 381        ide_hwif_t *hwif = drive->hwif;
 382
 383        if (ide_tune_dma(drive))
 384                return 0;
 385
 386        /* TODO: always do PIO fallback */
 387        if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
 388                return -1;
 389
 390        ide_set_max_pio(drive);
 391
 392        return -1;
 393}
 394
 395int ide_set_dma(ide_drive_t *drive)
 396{
 397        int rc;
 398
 399        /*
 400         * Force DMAing for the beginning of the check.
 401         * Some chipsets appear to do interesting
 402         * things, if not checked and cleared.
 403         *   PARANOIA!!!
 404         */
 405        ide_dma_off_quietly(drive);
 406
 407        rc = ide_dma_check(drive);
 408        if (rc)
 409                return rc;
 410
 411        ide_dma_on(drive);
 412
 413        return 0;
 414}
 415
 416void ide_check_dma_crc(ide_drive_t *drive)
 417{
 418        u8 mode;
 419
 420        ide_dma_off_quietly(drive);
 421        drive->crc_count = 0;
 422        mode = drive->current_speed;
 423        /*
 424         * Don't try non Ultra-DMA modes without iCRC's.  Force the
 425         * device to PIO and make the user enable SWDMA/MWDMA modes.
 426         */
 427        if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
 428                mode--;
 429        else
 430                mode = XFER_PIO_4;
 431        ide_set_xfer_rate(drive, mode);
 432        if (drive->current_speed >= XFER_SW_DMA_0)
 433                ide_dma_on(drive);
 434}
 435
 436void ide_dma_lost_irq(ide_drive_t *drive)
 437{
 438        printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name);
 439}
 440EXPORT_SYMBOL_GPL(ide_dma_lost_irq);
 441
 442/*
 443 * un-busy the port etc, and clear any pending DMA status. we want to
 444 * retry the current request in pio mode instead of risking tossing it
 445 * all away
 446 */
 447ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
 448{
 449        ide_hwif_t *hwif = drive->hwif;
 450        const struct ide_dma_ops *dma_ops = hwif->dma_ops;
 451        struct ide_cmd *cmd = &hwif->cmd;
 452        ide_startstop_t ret = ide_stopped;
 453
 454        /*
 455         * end current dma transaction
 456         */
 457
 458        if (error < 0) {
 459                printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
 460                drive->waiting_for_dma = 0;
 461                (void)dma_ops->dma_end(drive);
 462                ide_dma_unmap_sg(drive, cmd);
 463                ret = ide_error(drive, "dma timeout error",
 464                                hwif->tp_ops->read_status(hwif));
 465        } else {
 466                printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
 467                if (dma_ops->dma_clear)
 468                        dma_ops->dma_clear(drive);
 469                printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
 470                if (dma_ops->dma_test_irq(drive) == 0) {
 471                        ide_dump_status(drive, "DMA timeout",
 472                                        hwif->tp_ops->read_status(hwif));
 473                        drive->waiting_for_dma = 0;
 474                        (void)dma_ops->dma_end(drive);
 475                        ide_dma_unmap_sg(drive, cmd);
 476                }
 477        }
 478
 479        /*
 480         * disable dma for now, but remember that we did so because of
 481         * a timeout -- we'll reenable after we finish this next request
 482         * (or rather the first chunk of it) in pio.
 483         */
 484        drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
 485        drive->retry_pio++;
 486        ide_dma_off_quietly(drive);
 487
 488        /*
 489         * make sure request is sane
 490         */
 491        if (hwif->rq)
 492                hwif->rq->errors = 0;
 493        return ret;
 494}
 495
 496void ide_release_dma_engine(ide_hwif_t *hwif)
 497{
 498        if (hwif->dmatable_cpu) {
 499                int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
 500
 501                dma_free_coherent(hwif->dev, prd_size,
 502                                  hwif->dmatable_cpu, hwif->dmatable_dma);
 503                hwif->dmatable_cpu = NULL;
 504        }
 505}
 506EXPORT_SYMBOL_GPL(ide_release_dma_engine);
 507
 508int ide_allocate_dma_engine(ide_hwif_t *hwif)
 509{
 510        int prd_size;
 511
 512        if (hwif->prd_max_nents == 0)
 513                hwif->prd_max_nents = PRD_ENTRIES;
 514        if (hwif->prd_ent_size == 0)
 515                hwif->prd_ent_size = PRD_BYTES;
 516
 517        prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
 518
 519        hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
 520                                                &hwif->dmatable_dma,
 521                                                GFP_ATOMIC);
 522        if (hwif->dmatable_cpu == NULL) {
 523                printk(KERN_ERR "%s: unable to allocate PRD table\n",
 524                        hwif->name);
 525                return -ENOMEM;
 526        }
 527
 528        return 0;
 529}
 530EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
 531
 532int ide_dma_prepare(ide_drive_t *drive, struct ide_cmd *cmd)
 533{
 534        const struct ide_dma_ops *dma_ops = drive->hwif->dma_ops;
 535
 536        if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
 537            (dma_ops->dma_check && dma_ops->dma_check(drive, cmd)))
 538                goto out;
 539        ide_map_sg(drive, cmd);
 540        if (ide_dma_map_sg(drive, cmd) == 0)
 541                goto out_map;
 542        if (dma_ops->dma_setup(drive, cmd))
 543                goto out_dma_unmap;
 544        drive->waiting_for_dma = 1;
 545        return 0;
 546out_dma_unmap:
 547        ide_dma_unmap_sg(drive, cmd);
 548out_map:
 549        ide_map_sg(drive, cmd);
 550out:
 551        return 1;
 552}
 553