linux/drivers/ata/pata_pxa.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Generic PXA PATA driver
   4 *
   5 * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/module.h>
  10#include <linux/blkdev.h>
  11#include <linux/ata.h>
  12#include <linux/libata.h>
  13#include <linux/platform_device.h>
  14#include <linux/dmaengine.h>
  15#include <linux/slab.h>
  16#include <linux/completion.h>
  17
  18#include <scsi/scsi_host.h>
  19
  20#include <linux/platform_data/ata-pxa.h>
  21
  22#define DRV_NAME        "pata_pxa"
  23#define DRV_VERSION     "0.1"
  24
  25struct pata_pxa_data {
  26        struct dma_chan         *dma_chan;
  27        dma_cookie_t            dma_cookie;
  28        struct completion       dma_done;
  29};
  30
  31/*
  32 * DMA interrupt handler.
  33 */
  34static void pxa_ata_dma_irq(void *d)
  35{
  36        struct pata_pxa_data *pd = d;
  37        enum dma_status status;
  38
  39        status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
  40        if (status == DMA_ERROR || status == DMA_COMPLETE)
  41                complete(&pd->dma_done);
  42}
  43
  44/*
  45 * Prepare taskfile for submission.
  46 */
  47static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc)
  48{
  49        struct pata_pxa_data *pd = qc->ap->private_data;
  50        struct dma_async_tx_descriptor *tx;
  51        enum dma_transfer_direction dir;
  52
  53        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  54                return AC_ERR_OK;
  55
  56        dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
  57        tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
  58                                     DMA_PREP_INTERRUPT);
  59        if (!tx) {
  60                ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
  61                return AC_ERR_OK;
  62        }
  63        tx->callback = pxa_ata_dma_irq;
  64        tx->callback_param = pd;
  65        pd->dma_cookie = dmaengine_submit(tx);
  66
  67        return AC_ERR_OK;
  68}
  69
  70/*
  71 * Configure the DMA controller, load the DMA descriptors, but don't start the
  72 * DMA controller yet. Only issue the ATA command.
  73 */
  74static void pxa_bmdma_setup(struct ata_queued_cmd *qc)
  75{
  76        qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
  77}
  78
  79/*
  80 * Execute the DMA transfer.
  81 */
  82static void pxa_bmdma_start(struct ata_queued_cmd *qc)
  83{
  84        struct pata_pxa_data *pd = qc->ap->private_data;
  85        init_completion(&pd->dma_done);
  86        dma_async_issue_pending(pd->dma_chan);
  87}
  88
  89/*
  90 * Wait until the DMA transfer completes, then stop the DMA controller.
  91 */
  92static void pxa_bmdma_stop(struct ata_queued_cmd *qc)
  93{
  94        struct pata_pxa_data *pd = qc->ap->private_data;
  95        enum dma_status status;
  96
  97        status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
  98        if (status != DMA_ERROR && status != DMA_COMPLETE &&
  99            wait_for_completion_timeout(&pd->dma_done, HZ))
 100                ata_dev_err(qc->dev, "Timeout waiting for DMA completion!");
 101
 102        dmaengine_terminate_all(pd->dma_chan);
 103}
 104
 105/*
 106 * Read DMA status. The bmdma_stop() will take care of properly finishing the
 107 * DMA transfer so we always have DMA-complete interrupt here.
 108 */
 109static unsigned char pxa_bmdma_status(struct ata_port *ap)
 110{
 111        struct pata_pxa_data *pd = ap->private_data;
 112        unsigned char ret = ATA_DMA_INTR;
 113        struct dma_tx_state state;
 114        enum dma_status status;
 115
 116        status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, &state);
 117        if (status != DMA_COMPLETE)
 118                ret |= ATA_DMA_ERR;
 119
 120        return ret;
 121}
 122
 123/*
 124 * No IRQ register present so we do nothing.
 125 */
 126static void pxa_irq_clear(struct ata_port *ap)
 127{
 128}
 129
 130/*
 131 * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still
 132 * unclear why ATAPI has DMA issues.
 133 */
 134static int pxa_check_atapi_dma(struct ata_queued_cmd *qc)
 135{
 136        return -EOPNOTSUPP;
 137}
 138
 139static struct scsi_host_template pxa_ata_sht = {
 140        ATA_BMDMA_SHT(DRV_NAME),
 141};
 142
 143static struct ata_port_operations pxa_ata_port_ops = {
 144        .inherits               = &ata_bmdma_port_ops,
 145        .cable_detect           = ata_cable_40wire,
 146
 147        .bmdma_setup            = pxa_bmdma_setup,
 148        .bmdma_start            = pxa_bmdma_start,
 149        .bmdma_stop             = pxa_bmdma_stop,
 150        .bmdma_status           = pxa_bmdma_status,
 151
 152        .check_atapi_dma        = pxa_check_atapi_dma,
 153
 154        .sff_irq_clear          = pxa_irq_clear,
 155
 156        .qc_prep                = pxa_qc_prep,
 157};
 158
 159static int pxa_ata_probe(struct platform_device *pdev)
 160{
 161        struct ata_host *host;
 162        struct ata_port *ap;
 163        struct pata_pxa_data *data;
 164        struct resource *cmd_res;
 165        struct resource *ctl_res;
 166        struct resource *dma_res;
 167        struct resource *irq_res;
 168        struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
 169        struct dma_slave_config config;
 170        int ret = 0;
 171
 172        /*
 173         * Resource validation, three resources are needed:
 174         *  - CMD port base address
 175         *  - CTL port base address
 176         *  - DMA port base address
 177         *  - IRQ pin
 178         */
 179        if (pdev->num_resources != 4) {
 180                dev_err(&pdev->dev, "invalid number of resources\n");
 181                return -EINVAL;
 182        }
 183
 184        /*
 185         * CMD port base address
 186         */
 187        cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 188        if (unlikely(cmd_res == NULL))
 189                return -EINVAL;
 190
 191        /*
 192         * CTL port base address
 193         */
 194        ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 195        if (unlikely(ctl_res == NULL))
 196                return -EINVAL;
 197
 198        /*
 199         * DMA port base address
 200         */
 201        dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
 202        if (unlikely(dma_res == NULL))
 203                return -EINVAL;
 204
 205        /*
 206         * IRQ pin
 207         */
 208        irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 209        if (unlikely(irq_res == NULL))
 210                return -EINVAL;
 211
 212        /*
 213         * Allocate the host
 214         */
 215        host = ata_host_alloc(&pdev->dev, 1);
 216        if (!host)
 217                return -ENOMEM;
 218
 219        ap              = host->ports[0];
 220        ap->ops         = &pxa_ata_port_ops;
 221        ap->pio_mask    = ATA_PIO4;
 222        ap->mwdma_mask  = ATA_MWDMA2;
 223
 224        ap->ioaddr.cmd_addr     = devm_ioremap(&pdev->dev, cmd_res->start,
 225                                                resource_size(cmd_res));
 226        ap->ioaddr.ctl_addr     = devm_ioremap(&pdev->dev, ctl_res->start,
 227                                                resource_size(ctl_res));
 228        ap->ioaddr.bmdma_addr   = devm_ioremap(&pdev->dev, dma_res->start,
 229                                                resource_size(dma_res));
 230
 231        /*
 232         * Adjust register offsets
 233         */
 234        ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
 235        ap->ioaddr.data_addr    = ap->ioaddr.cmd_addr +
 236                                        (ATA_REG_DATA << pdata->reg_shift);
 237        ap->ioaddr.error_addr   = ap->ioaddr.cmd_addr +
 238                                        (ATA_REG_ERR << pdata->reg_shift);
 239        ap->ioaddr.feature_addr = ap->ioaddr.cmd_addr +
 240                                        (ATA_REG_FEATURE << pdata->reg_shift);
 241        ap->ioaddr.nsect_addr   = ap->ioaddr.cmd_addr +
 242                                        (ATA_REG_NSECT << pdata->reg_shift);
 243        ap->ioaddr.lbal_addr    = ap->ioaddr.cmd_addr +
 244                                        (ATA_REG_LBAL << pdata->reg_shift);
 245        ap->ioaddr.lbam_addr    = ap->ioaddr.cmd_addr +
 246                                        (ATA_REG_LBAM << pdata->reg_shift);
 247        ap->ioaddr.lbah_addr    = ap->ioaddr.cmd_addr +
 248                                        (ATA_REG_LBAH << pdata->reg_shift);
 249        ap->ioaddr.device_addr  = ap->ioaddr.cmd_addr +
 250                                        (ATA_REG_DEVICE << pdata->reg_shift);
 251        ap->ioaddr.status_addr  = ap->ioaddr.cmd_addr +
 252                                        (ATA_REG_STATUS << pdata->reg_shift);
 253        ap->ioaddr.command_addr = ap->ioaddr.cmd_addr +
 254                                        (ATA_REG_CMD << pdata->reg_shift);
 255
 256        /*
 257         * Allocate and load driver's internal data structure
 258         */
 259        data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data),
 260                                                                GFP_KERNEL);
 261        if (!data)
 262                return -ENOMEM;
 263
 264        ap->private_data = data;
 265
 266        memset(&config, 0, sizeof(config));
 267        config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 268        config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 269        config.src_addr = dma_res->start;
 270        config.dst_addr = dma_res->start;
 271        config.src_maxburst = 32;
 272        config.dst_maxburst = 32;
 273
 274        /*
 275         * Request the DMA channel
 276         */
 277        data->dma_chan =
 278                dma_request_slave_channel(&pdev->dev, "data");
 279        if (!data->dma_chan)
 280                return -EBUSY;
 281        ret = dmaengine_slave_config(data->dma_chan, &config);
 282        if (ret < 0) {
 283                dev_err(&pdev->dev, "dma configuration failed: %d\n", ret);
 284                return ret;
 285        }
 286
 287        /*
 288         * Activate the ATA host
 289         */
 290        ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
 291                                pdata->irq_flags, &pxa_ata_sht);
 292        if (ret)
 293                dma_release_channel(data->dma_chan);
 294
 295        return ret;
 296}
 297
 298static int pxa_ata_remove(struct platform_device *pdev)
 299{
 300        struct ata_host *host = platform_get_drvdata(pdev);
 301        struct pata_pxa_data *data = host->ports[0]->private_data;
 302
 303        dma_release_channel(data->dma_chan);
 304
 305        ata_host_detach(host);
 306
 307        return 0;
 308}
 309
 310static struct platform_driver pxa_ata_driver = {
 311        .probe          = pxa_ata_probe,
 312        .remove         = pxa_ata_remove,
 313        .driver         = {
 314                .name           = DRV_NAME,
 315        },
 316};
 317
 318module_platform_driver(pxa_ata_driver);
 319
 320MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
 321MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
 322MODULE_LICENSE("GPL");
 323MODULE_VERSION(DRV_VERSION);
 324MODULE_ALIAS("platform:" DRV_NAME);
 325