linux/drivers/ata/pata_pxa.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Generic PXA PATA driver
   4 *
   5 * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/module.h>
  10#include <linux/blkdev.h>
  11#include <linux/ata.h>
  12#include <linux/libata.h>
  13#include <linux/platform_device.h>
  14#include <linux/dmaengine.h>
  15#include <linux/slab.h>
  16#include <linux/completion.h>
  17
  18#include <scsi/scsi_host.h>
  19
  20#include <linux/platform_data/ata-pxa.h>
  21
  22#define DRV_NAME        "pata_pxa"
  23#define DRV_VERSION     "0.1"
  24
  25struct pata_pxa_data {
  26        struct dma_chan         *dma_chan;
  27        dma_cookie_t            dma_cookie;
  28        struct completion       dma_done;
  29};
  30
  31/*
  32 * DMA interrupt handler.
  33 */
  34static void pxa_ata_dma_irq(void *d)
  35{
  36        struct pata_pxa_data *pd = d;
  37        enum dma_status status;
  38
  39        status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
  40        if (status == DMA_ERROR || status == DMA_COMPLETE)
  41                complete(&pd->dma_done);
  42}
  43
  44/*
  45 * Prepare taskfile for submission.
  46 */
  47static void pxa_qc_prep(struct ata_queued_cmd *qc)
  48{
  49        struct pata_pxa_data *pd = qc->ap->private_data;
  50        struct dma_async_tx_descriptor *tx;
  51        enum dma_transfer_direction dir;
  52
  53        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  54                return;
  55
  56        dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
  57        tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
  58                                     DMA_PREP_INTERRUPT);
  59        if (!tx) {
  60                ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
  61                return;
  62        }
  63        tx->callback = pxa_ata_dma_irq;
  64        tx->callback_param = pd;
  65        pd->dma_cookie = dmaengine_submit(tx);
  66}
  67
  68/*
  69 * Configure the DMA controller, load the DMA descriptors, but don't start the
  70 * DMA controller yet. Only issue the ATA command.
  71 */
  72static void pxa_bmdma_setup(struct ata_queued_cmd *qc)
  73{
  74        qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
  75}
  76
  77/*
  78 * Execute the DMA transfer.
  79 */
  80static void pxa_bmdma_start(struct ata_queued_cmd *qc)
  81{
  82        struct pata_pxa_data *pd = qc->ap->private_data;
  83        init_completion(&pd->dma_done);
  84        dma_async_issue_pending(pd->dma_chan);
  85}
  86
  87/*
  88 * Wait until the DMA transfer completes, then stop the DMA controller.
  89 */
  90static void pxa_bmdma_stop(struct ata_queued_cmd *qc)
  91{
  92        struct pata_pxa_data *pd = qc->ap->private_data;
  93        enum dma_status status;
  94
  95        status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
  96        if (status != DMA_ERROR && status != DMA_COMPLETE &&
  97            wait_for_completion_timeout(&pd->dma_done, HZ))
  98                ata_dev_err(qc->dev, "Timeout waiting for DMA completion!");
  99
 100        dmaengine_terminate_all(pd->dma_chan);
 101}
 102
 103/*
 104 * Read DMA status. The bmdma_stop() will take care of properly finishing the
 105 * DMA transfer so we always have DMA-complete interrupt here.
 106 */
 107static unsigned char pxa_bmdma_status(struct ata_port *ap)
 108{
 109        struct pata_pxa_data *pd = ap->private_data;
 110        unsigned char ret = ATA_DMA_INTR;
 111        struct dma_tx_state state;
 112        enum dma_status status;
 113
 114        status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, &state);
 115        if (status != DMA_COMPLETE)
 116                ret |= ATA_DMA_ERR;
 117
 118        return ret;
 119}
 120
 121/*
 122 * No IRQ register present so we do nothing.
 123 */
 124static void pxa_irq_clear(struct ata_port *ap)
 125{
 126}
 127
 128/*
 129 * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still
 130 * unclear why ATAPI has DMA issues.
 131 */
 132static int pxa_check_atapi_dma(struct ata_queued_cmd *qc)
 133{
 134        return -EOPNOTSUPP;
 135}
 136
 137static struct scsi_host_template pxa_ata_sht = {
 138        ATA_BMDMA_SHT(DRV_NAME),
 139};
 140
 141static struct ata_port_operations pxa_ata_port_ops = {
 142        .inherits               = &ata_bmdma_port_ops,
 143        .cable_detect           = ata_cable_40wire,
 144
 145        .bmdma_setup            = pxa_bmdma_setup,
 146        .bmdma_start            = pxa_bmdma_start,
 147        .bmdma_stop             = pxa_bmdma_stop,
 148        .bmdma_status           = pxa_bmdma_status,
 149
 150        .check_atapi_dma        = pxa_check_atapi_dma,
 151
 152        .sff_irq_clear          = pxa_irq_clear,
 153
 154        .qc_prep                = pxa_qc_prep,
 155};
 156
 157static int pxa_ata_probe(struct platform_device *pdev)
 158{
 159        struct ata_host *host;
 160        struct ata_port *ap;
 161        struct pata_pxa_data *data;
 162        struct resource *cmd_res;
 163        struct resource *ctl_res;
 164        struct resource *dma_res;
 165        struct resource *irq_res;
 166        struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
 167        struct dma_slave_config config;
 168        int ret = 0;
 169
 170        /*
 171         * Resource validation, three resources are needed:
 172         *  - CMD port base address
 173         *  - CTL port base address
 174         *  - DMA port base address
 175         *  - IRQ pin
 176         */
 177        if (pdev->num_resources != 4) {
 178                dev_err(&pdev->dev, "invalid number of resources\n");
 179                return -EINVAL;
 180        }
 181
 182        /*
 183         * CMD port base address
 184         */
 185        cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 186        if (unlikely(cmd_res == NULL))
 187                return -EINVAL;
 188
 189        /*
 190         * CTL port base address
 191         */
 192        ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 193        if (unlikely(ctl_res == NULL))
 194                return -EINVAL;
 195
 196        /*
 197         * DMA port base address
 198         */
 199        dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
 200        if (unlikely(dma_res == NULL))
 201                return -EINVAL;
 202
 203        /*
 204         * IRQ pin
 205         */
 206        irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 207        if (unlikely(irq_res == NULL))
 208                return -EINVAL;
 209
 210        /*
 211         * Allocate the host
 212         */
 213        host = ata_host_alloc(&pdev->dev, 1);
 214        if (!host)
 215                return -ENOMEM;
 216
 217        ap              = host->ports[0];
 218        ap->ops         = &pxa_ata_port_ops;
 219        ap->pio_mask    = ATA_PIO4;
 220        ap->mwdma_mask  = ATA_MWDMA2;
 221
 222        ap->ioaddr.cmd_addr     = devm_ioremap(&pdev->dev, cmd_res->start,
 223                                                resource_size(cmd_res));
 224        ap->ioaddr.ctl_addr     = devm_ioremap(&pdev->dev, ctl_res->start,
 225                                                resource_size(ctl_res));
 226        ap->ioaddr.bmdma_addr   = devm_ioremap(&pdev->dev, dma_res->start,
 227                                                resource_size(dma_res));
 228
 229        /*
 230         * Adjust register offsets
 231         */
 232        ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
 233        ap->ioaddr.data_addr    = ap->ioaddr.cmd_addr +
 234                                        (ATA_REG_DATA << pdata->reg_shift);
 235        ap->ioaddr.error_addr   = ap->ioaddr.cmd_addr +
 236                                        (ATA_REG_ERR << pdata->reg_shift);
 237        ap->ioaddr.feature_addr = ap->ioaddr.cmd_addr +
 238                                        (ATA_REG_FEATURE << pdata->reg_shift);
 239        ap->ioaddr.nsect_addr   = ap->ioaddr.cmd_addr +
 240                                        (ATA_REG_NSECT << pdata->reg_shift);
 241        ap->ioaddr.lbal_addr    = ap->ioaddr.cmd_addr +
 242                                        (ATA_REG_LBAL << pdata->reg_shift);
 243        ap->ioaddr.lbam_addr    = ap->ioaddr.cmd_addr +
 244                                        (ATA_REG_LBAM << pdata->reg_shift);
 245        ap->ioaddr.lbah_addr    = ap->ioaddr.cmd_addr +
 246                                        (ATA_REG_LBAH << pdata->reg_shift);
 247        ap->ioaddr.device_addr  = ap->ioaddr.cmd_addr +
 248                                        (ATA_REG_DEVICE << pdata->reg_shift);
 249        ap->ioaddr.status_addr  = ap->ioaddr.cmd_addr +
 250                                        (ATA_REG_STATUS << pdata->reg_shift);
 251        ap->ioaddr.command_addr = ap->ioaddr.cmd_addr +
 252                                        (ATA_REG_CMD << pdata->reg_shift);
 253
 254        /*
 255         * Allocate and load driver's internal data structure
 256         */
 257        data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data),
 258                                                                GFP_KERNEL);
 259        if (!data)
 260                return -ENOMEM;
 261
 262        ap->private_data = data;
 263
 264        memset(&config, 0, sizeof(config));
 265        config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 266        config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 267        config.src_addr = dma_res->start;
 268        config.dst_addr = dma_res->start;
 269        config.src_maxburst = 32;
 270        config.dst_maxburst = 32;
 271
 272        /*
 273         * Request the DMA channel
 274         */
 275        data->dma_chan =
 276                dma_request_slave_channel(&pdev->dev, "data");
 277        if (!data->dma_chan)
 278                return -EBUSY;
 279        ret = dmaengine_slave_config(data->dma_chan, &config);
 280        if (ret < 0) {
 281                dev_err(&pdev->dev, "dma configuration failed: %d\n", ret);
 282                return ret;
 283        }
 284
 285        /*
 286         * Activate the ATA host
 287         */
 288        ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
 289                                pdata->irq_flags, &pxa_ata_sht);
 290        if (ret)
 291                dma_release_channel(data->dma_chan);
 292
 293        return ret;
 294}
 295
 296static int pxa_ata_remove(struct platform_device *pdev)
 297{
 298        struct ata_host *host = platform_get_drvdata(pdev);
 299        struct pata_pxa_data *data = host->ports[0]->private_data;
 300
 301        dma_release_channel(data->dma_chan);
 302
 303        ata_host_detach(host);
 304
 305        return 0;
 306}
 307
 308static struct platform_driver pxa_ata_driver = {
 309        .probe          = pxa_ata_probe,
 310        .remove         = pxa_ata_remove,
 311        .driver         = {
 312                .name           = DRV_NAME,
 313        },
 314};
 315
 316module_platform_driver(pxa_ata_driver);
 317
 318MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
 319MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
 320MODULE_LICENSE("GPL");
 321MODULE_VERSION(DRV_VERSION);
 322MODULE_ALIAS("platform:" DRV_NAME);
 323