linux/drivers/ide/ide-dma.c
<<
>>
Prefs
   1/*
   2 *  IDE DMA support (including IDE PCI BM-DMA).
   3 *
   4 *  Copyright (C) 1995-1998   Mark Lord
   5 *  Copyright (C) 1999-2000   Andre Hedrick <andre@linux-ide.org>
   6 *  Copyright (C) 2004, 2007  Bartlomiej Zolnierkiewicz
   7 *
   8 *  May be copied or modified under the terms of the GNU General Public License
   9 *
  10 *  DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
  11 */
  12
  13/*
  14 *  Special Thanks to Mark for his Six years of work.
  15 */
  16
  17/*
  18 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
  19 * fixing the problem with the BIOS on some Acer motherboards.
  20 *
  21 * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
  22 * "TX" chipset compatibility and for providing patches for the "TX" chipset.
  23 *
  24 * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
  25 * at generic DMA -- his patches were referred to when preparing this code.
  26 *
  27 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
  28 * for supplying a Promise UDMA board & WD UDMA drive for this work!
  29 */
  30
  31#include <linux/types.h>
  32#include <linux/gfp.h>
  33#include <linux/kernel.h>
  34#include <linux/export.h>
  35#include <linux/ide.h>
  36#include <linux/scatterlist.h>
  37#include <linux/dma-mapping.h>
  38
  39static const struct drive_list_entry drive_whitelist[] = {
  40        { "Micropolis 2112A"    ,       NULL            },
  41        { "CONNER CTMA 4000"    ,       NULL            },
  42        { "CONNER CTT8000-A"    ,       NULL            },
  43        { "ST34342A"            ,       NULL            },
  44        { NULL                  ,       NULL            }
  45};
  46
  47static const struct drive_list_entry drive_blacklist[] = {
  48        { "WDC AC11000H"        ,       NULL            },
  49        { "WDC AC22100H"        ,       NULL            },
  50        { "WDC AC32500H"        ,       NULL            },
  51        { "WDC AC33100H"        ,       NULL            },
  52        { "WDC AC31600H"        ,       NULL            },
  53        { "WDC AC32100H"        ,       "24.09P07"      },
  54        { "WDC AC23200L"        ,       "21.10N21"      },
  55        { "Compaq CRD-8241B"    ,       NULL            },
  56        { "CRD-8400B"           ,       NULL            },
  57        { "CRD-8480B",                  NULL            },
  58        { "CRD-8482B",                  NULL            },
  59        { "CRD-84"              ,       NULL            },
  60        { "SanDisk SDP3B"       ,       NULL            },
  61        { "SanDisk SDP3B-64"    ,       NULL            },
  62        { "SANYO CD-ROM CRD"    ,       NULL            },
  63        { "HITACHI CDR-8"       ,       NULL            },
  64        { "HITACHI CDR-8335"    ,       NULL            },
  65        { "HITACHI CDR-8435"    ,       NULL            },
  66        { "Toshiba CD-ROM XM-6202B"     ,       NULL            },
  67        { "TOSHIBA CD-ROM XM-1702BC",   NULL            },
  68        { "CD-532E-A"           ,       NULL            },
  69        { "E-IDE CD-ROM CR-840",        NULL            },
  70        { "CD-ROM Drive/F5A",   NULL            },
  71        { "WPI CDD-820",                NULL            },
  72        { "SAMSUNG CD-ROM SC-148C",     NULL            },
  73        { "SAMSUNG CD-ROM SC",  NULL            },
  74        { "ATAPI CD-ROM DRIVE 40X MAXIMUM",     NULL            },
  75        { "_NEC DV5800A",               NULL            },
  76        { "SAMSUNG CD-ROM SN-124",      "N001" },
  77        { "Seagate STT20000A",          NULL  },
  78        { "CD-ROM CDR_U200",            "1.09" },
  79        { NULL                  ,       NULL            }
  80
  81};
  82
  83/**
  84 *      ide_dma_intr    -       IDE DMA interrupt handler
  85 *      @drive: the drive the interrupt is for
  86 *
  87 *      Handle an interrupt completing a read/write DMA transfer on an
  88 *      IDE device
  89 */
  90
  91ide_startstop_t ide_dma_intr(ide_drive_t *drive)
  92{
  93        ide_hwif_t *hwif = drive->hwif;
  94        struct ide_cmd *cmd = &hwif->cmd;
  95        u8 stat = 0, dma_stat = 0;
  96
  97        drive->waiting_for_dma = 0;
  98        dma_stat = hwif->dma_ops->dma_end(drive);
  99        ide_dma_unmap_sg(drive, cmd);
 100        stat = hwif->tp_ops->read_status(hwif);
 101
 102        if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) {
 103                if (!dma_stat) {
 104                        if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
 105                                ide_finish_cmd(drive, cmd, stat);
 106                        else
 107                                ide_complete_rq(drive, BLK_STS_OK,
 108                                                blk_rq_sectors(cmd->rq) << 9);
 109                        return ide_stopped;
 110                }
 111                printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
 112                        drive->name, __func__, dma_stat);
 113        }
 114        return ide_error(drive, "dma_intr", stat);
 115}
 116
 117int ide_dma_good_drive(ide_drive_t *drive)
 118{
 119        return ide_in_drive_list(drive->id, drive_whitelist);
 120}
 121
 122/**
 123 *      ide_dma_map_sg  -       map IDE scatter gather for DMA I/O
 124 *      @drive: the drive to map the DMA table for
 125 *      @cmd: command
 126 *
 127 *      Perform the DMA mapping magic necessary to access the source or
 128 *      target buffers of a request via DMA.  The lower layers of the
 129 *      kernel provide the necessary cache management so that we can
 130 *      operate in a portable fashion.
 131 */
 132
 133static int ide_dma_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
 134{
 135        ide_hwif_t *hwif = drive->hwif;
 136        struct scatterlist *sg = hwif->sg_table;
 137        int i;
 138
 139        if (cmd->tf_flags & IDE_TFLAG_WRITE)
 140                cmd->sg_dma_direction = DMA_TO_DEVICE;
 141        else
 142                cmd->sg_dma_direction = DMA_FROM_DEVICE;
 143
 144        i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction);
 145        if (i) {
 146                cmd->orig_sg_nents = cmd->sg_nents;
 147                cmd->sg_nents = i;
 148        }
 149
 150        return i;
 151}
 152
 153/**
 154 *      ide_dma_unmap_sg        -       clean up DMA mapping
 155 *      @drive: The drive to unmap
 156 *
 157 *      Teardown mappings after DMA has completed. This must be called
 158 *      after the completion of each use of ide_build_dmatable and before
 159 *      the next use of ide_build_dmatable. Failure to do so will cause
 160 *      an oops as only one mapping can be live for each target at a given
 161 *      time.
 162 */
 163
 164void ide_dma_unmap_sg(ide_drive_t *drive, struct ide_cmd *cmd)
 165{
 166        ide_hwif_t *hwif = drive->hwif;
 167
 168        dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents,
 169                     cmd->sg_dma_direction);
 170}
 171EXPORT_SYMBOL_GPL(ide_dma_unmap_sg);
 172
 173/**
 174 *      ide_dma_off_quietly     -       Generic DMA kill
 175 *      @drive: drive to control
 176 *
 177 *      Turn off the current DMA on this IDE controller.
 178 */
 179
 180void ide_dma_off_quietly(ide_drive_t *drive)
 181{
 182        drive->dev_flags &= ~IDE_DFLAG_USING_DMA;
 183
 184        drive->hwif->dma_ops->dma_host_set(drive, 0);
 185}
 186EXPORT_SYMBOL(ide_dma_off_quietly);
 187
 188/**
 189 *      ide_dma_off     -       disable DMA on a device
 190 *      @drive: drive to disable DMA on
 191 *
 192 *      Disable IDE DMA for a device on this IDE controller.
 193 *      Inform the user that DMA has been disabled.
 194 */
 195
 196void ide_dma_off(ide_drive_t *drive)
 197{
 198        printk(KERN_INFO "%s: DMA disabled\n", drive->name);
 199        ide_dma_off_quietly(drive);
 200}
 201EXPORT_SYMBOL(ide_dma_off);
 202
 203/**
 204 *      ide_dma_on              -       Enable DMA on a device
 205 *      @drive: drive to enable DMA on
 206 *
 207 *      Enable IDE DMA for a device on this IDE controller.
 208 */
 209
 210void ide_dma_on(ide_drive_t *drive)
 211{
 212        drive->dev_flags |= IDE_DFLAG_USING_DMA;
 213
 214        drive->hwif->dma_ops->dma_host_set(drive, 1);
 215}
 216
 217int __ide_dma_bad_drive(ide_drive_t *drive)
 218{
 219        u16 *id = drive->id;
 220
 221        int blacklist = ide_in_drive_list(id, drive_blacklist);
 222        if (blacklist) {
 223                printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
 224                                    drive->name, (char *)&id[ATA_ID_PROD]);
 225                return blacklist;
 226        }
 227        return 0;
 228}
 229EXPORT_SYMBOL(__ide_dma_bad_drive);
 230
 231static const u8 xfer_mode_bases[] = {
 232        XFER_UDMA_0,
 233        XFER_MW_DMA_0,
 234        XFER_SW_DMA_0,
 235};
 236
 237static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
 238{
 239        u16 *id = drive->id;
 240        ide_hwif_t *hwif = drive->hwif;
 241        const struct ide_port_ops *port_ops = hwif->port_ops;
 242        unsigned int mask = 0;
 243
 244        switch (base) {
 245        case XFER_UDMA_0:
 246                if ((id[ATA_ID_FIELD_VALID] & 4) == 0)
 247                        break;
 248                mask = id[ATA_ID_UDMA_MODES];
 249                if (port_ops && port_ops->udma_filter)
 250                        mask &= port_ops->udma_filter(drive);
 251                else
 252                        mask &= hwif->ultra_mask;
 253
 254                /*
 255                 * avoid false cable warning from eighty_ninty_three()
 256                 */
 257                if (req_mode > XFER_UDMA_2) {
 258                        if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
 259                                mask &= 0x07;
 260                }
 261                break;
 262        case XFER_MW_DMA_0:
 263                mask = id[ATA_ID_MWDMA_MODES];
 264
 265                /* Also look for the CF specific MWDMA modes... */
 266                if (ata_id_is_cfa(id) && (id[ATA_ID_CFA_MODES] & 0x38)) {
 267                        u8 mode = ((id[ATA_ID_CFA_MODES] & 0x38) >> 3) - 1;
 268
 269                        mask |= ((2 << mode) - 1) << 3;
 270                }
 271
 272                if (port_ops && port_ops->mdma_filter)
 273                        mask &= port_ops->mdma_filter(drive);
 274                else
 275                        mask &= hwif->mwdma_mask;
 276                break;
 277        case XFER_SW_DMA_0:
 278                mask = id[ATA_ID_SWDMA_MODES];
 279                if (!(mask & ATA_SWDMA2) && (id[ATA_ID_OLD_DMA_MODES] >> 8)) {
 280                        u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8;
 281
 282                        /*
 283                         * if the mode is valid convert it to the mask
 284                         * (the maximum allowed mode is XFER_SW_DMA_2)
 285                         */
 286                        if (mode <= 2)
 287                                mask = (2 << mode) - 1;
 288                }
 289                mask &= hwif->swdma_mask;
 290                break;
 291        default:
 292                BUG();
 293                break;
 294        }
 295
 296        return mask;
 297}
 298
 299/**
 300 *      ide_find_dma_mode       -       compute DMA speed
 301 *      @drive: IDE device
 302 *      @req_mode: requested mode
 303 *
 304 *      Checks the drive/host capabilities and finds the speed to use for
 305 *      the DMA transfer.  The speed is then limited by the requested mode.
 306 *
 307 *      Returns 0 if the drive/host combination is incapable of DMA transfers
 308 *      or if the requested mode is not a DMA mode.
 309 */
 310
 311u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
 312{
 313        ide_hwif_t *hwif = drive->hwif;
 314        unsigned int mask;
 315        int x, i;
 316        u8 mode = 0;
 317
 318        if (drive->media != ide_disk) {
 319                if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
 320                        return 0;
 321        }
 322
 323        for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
 324                if (req_mode < xfer_mode_bases[i])
 325                        continue;
 326                mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
 327                x = fls(mask) - 1;
 328                if (x >= 0) {
 329                        mode = xfer_mode_bases[i] + x;
 330                        break;
 331                }
 332        }
 333
 334        if (hwif->chipset == ide_acorn && mode == 0) {
 335                /*
 336                 * is this correct?
 337                 */
 338                if (ide_dma_good_drive(drive) &&
 339                    drive->id[ATA_ID_EIDE_DMA_TIME] < 150)
 340                        mode = XFER_MW_DMA_1;
 341        }
 342
 343        mode = min(mode, req_mode);
 344
 345        printk(KERN_INFO "%s: %s mode selected\n", drive->name,
 346                          mode ? ide_xfer_verbose(mode) : "no DMA");
 347
 348        return mode;
 349}
 350
 351static int ide_tune_dma(ide_drive_t *drive)
 352{
 353        ide_hwif_t *hwif = drive->hwif;
 354        u8 speed;
 355
 356        if (ata_id_has_dma(drive->id) == 0 ||
 357            (drive->dev_flags & IDE_DFLAG_NODMA))
 358                return 0;
 359
 360        /* consult the list of known "bad" drives */
 361        if (__ide_dma_bad_drive(drive))
 362                return 0;
 363
 364        if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
 365                return config_drive_for_dma(drive);
 366
 367        speed = ide_max_dma_mode(drive);
 368
 369        if (!speed)
 370                return 0;
 371
 372        if (ide_set_dma_mode(drive, speed))
 373                return 0;
 374
 375        return 1;
 376}
 377
 378static int ide_dma_check(ide_drive_t *drive)
 379{
 380        ide_hwif_t *hwif = drive->hwif;
 381
 382        if (ide_tune_dma(drive))
 383                return 0;
 384
 385        /* TODO: always do PIO fallback */
 386        if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
 387                return -1;
 388
 389        ide_set_max_pio(drive);
 390
 391        return -1;
 392}
 393
 394int ide_set_dma(ide_drive_t *drive)
 395{
 396        int rc;
 397
 398        /*
 399         * Force DMAing for the beginning of the check.
 400         * Some chipsets appear to do interesting
 401         * things, if not checked and cleared.
 402         *   PARANOIA!!!
 403         */
 404        ide_dma_off_quietly(drive);
 405
 406        rc = ide_dma_check(drive);
 407        if (rc)
 408                return rc;
 409
 410        ide_dma_on(drive);
 411
 412        return 0;
 413}
 414
 415void ide_check_dma_crc(ide_drive_t *drive)
 416{
 417        u8 mode;
 418
 419        ide_dma_off_quietly(drive);
 420        drive->crc_count = 0;
 421        mode = drive->current_speed;
 422        /*
 423         * Don't try non Ultra-DMA modes without iCRC's.  Force the
 424         * device to PIO and make the user enable SWDMA/MWDMA modes.
 425         */
 426        if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
 427                mode--;
 428        else
 429                mode = XFER_PIO_4;
 430        ide_set_xfer_rate(drive, mode);
 431        if (drive->current_speed >= XFER_SW_DMA_0)
 432                ide_dma_on(drive);
 433}
 434
 435void ide_dma_lost_irq(ide_drive_t *drive)
 436{
 437        printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name);
 438}
 439EXPORT_SYMBOL_GPL(ide_dma_lost_irq);
 440
 441/*
 442 * un-busy the port etc, and clear any pending DMA status. we want to
 443 * retry the current request in pio mode instead of risking tossing it
 444 * all away
 445 */
 446ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
 447{
 448        ide_hwif_t *hwif = drive->hwif;
 449        const struct ide_dma_ops *dma_ops = hwif->dma_ops;
 450        struct ide_cmd *cmd = &hwif->cmd;
 451        ide_startstop_t ret = ide_stopped;
 452
 453        /*
 454         * end current dma transaction
 455         */
 456
 457        if (error < 0) {
 458                printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
 459                drive->waiting_for_dma = 0;
 460                (void)dma_ops->dma_end(drive);
 461                ide_dma_unmap_sg(drive, cmd);
 462                ret = ide_error(drive, "dma timeout error",
 463                                hwif->tp_ops->read_status(hwif));
 464        } else {
 465                printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
 466                if (dma_ops->dma_clear)
 467                        dma_ops->dma_clear(drive);
 468                printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
 469                if (dma_ops->dma_test_irq(drive) == 0) {
 470                        ide_dump_status(drive, "DMA timeout",
 471                                        hwif->tp_ops->read_status(hwif));
 472                        drive->waiting_for_dma = 0;
 473                        (void)dma_ops->dma_end(drive);
 474                        ide_dma_unmap_sg(drive, cmd);
 475                }
 476        }
 477
 478        /*
 479         * disable dma for now, but remember that we did so because of
 480         * a timeout -- we'll reenable after we finish this next request
 481         * (or rather the first chunk of it) in pio.
 482         */
 483        drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
 484        drive->retry_pio++;
 485        ide_dma_off_quietly(drive);
 486
 487        /*
 488         * make sure request is sane
 489         */
 490        if (hwif->rq)
 491                scsi_req(hwif->rq)->result = 0;
 492        return ret;
 493}
 494
 495void ide_release_dma_engine(ide_hwif_t *hwif)
 496{
 497        if (hwif->dmatable_cpu) {
 498                int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
 499
 500                dma_free_coherent(hwif->dev, prd_size,
 501                                  hwif->dmatable_cpu, hwif->dmatable_dma);
 502                hwif->dmatable_cpu = NULL;
 503        }
 504}
 505EXPORT_SYMBOL_GPL(ide_release_dma_engine);
 506
 507int ide_allocate_dma_engine(ide_hwif_t *hwif)
 508{
 509        int prd_size;
 510
 511        if (hwif->prd_max_nents == 0)
 512                hwif->prd_max_nents = PRD_ENTRIES;
 513        if (hwif->prd_ent_size == 0)
 514                hwif->prd_ent_size = PRD_BYTES;
 515
 516        prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
 517
 518        hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
 519                                                &hwif->dmatable_dma,
 520                                                GFP_ATOMIC);
 521        if (hwif->dmatable_cpu == NULL) {
 522                printk(KERN_ERR "%s: unable to allocate PRD table\n",
 523                        hwif->name);
 524                return -ENOMEM;
 525        }
 526
 527        return 0;
 528}
 529EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
 530
 531int ide_dma_prepare(ide_drive_t *drive, struct ide_cmd *cmd)
 532{
 533        const struct ide_dma_ops *dma_ops = drive->hwif->dma_ops;
 534
 535        if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
 536            (dma_ops->dma_check && dma_ops->dma_check(drive, cmd)))
 537                goto out;
 538        ide_map_sg(drive, cmd);
 539        if (ide_dma_map_sg(drive, cmd) == 0)
 540                goto out_map;
 541        if (dma_ops->dma_setup(drive, cmd))
 542                goto out_dma_unmap;
 543        drive->waiting_for_dma = 1;
 544        return 0;
 545out_dma_unmap:
 546        ide_dma_unmap_sg(drive, cmd);
 547out_map:
 548        ide_map_sg(drive, cmd);
 549out:
 550        return 1;
 551}
 552