linux/drivers/spi/spi-pxa2xx-dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * PXA2xx SPI DMA engine support.
   4 *
   5 * Copyright (C) 2013, Intel Corporation
   6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
   7 */
   8
   9#include <linux/device.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/dmaengine.h>
  12#include <linux/pxa2xx_ssp.h>
  13#include <linux/scatterlist.h>
  14#include <linux/sizes.h>
  15#include <linux/spi/spi.h>
  16#include <linux/spi/pxa2xx_spi.h>
  17
  18#include "spi-pxa2xx.h"
  19
  20static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
  21                                             bool error)
  22{
  23        struct spi_message *msg = drv_data->controller->cur_msg;
  24
  25        /*
  26         * It is possible that one CPU is handling ROR interrupt and other
  27         * just gets DMA completion. Calling pump_transfers() twice for the
  28         * same transfer leads to problems thus we prevent concurrent calls
  29         * by using ->dma_running.
  30         */
  31        if (atomic_dec_and_test(&drv_data->dma_running)) {
  32                /*
  33                 * If the other CPU is still handling the ROR interrupt we
  34                 * might not know about the error yet. So we re-check the
  35                 * ROR bit here before we clear the status register.
  36                 */
  37                if (!error) {
  38                        u32 status = pxa2xx_spi_read(drv_data, SSSR)
  39                                     & drv_data->mask_sr;
  40                        error = status & SSSR_ROR;
  41                }
  42
  43                /* Clear status & disable interrupts */
  44                pxa2xx_spi_write(drv_data, SSCR1,
  45                                 pxa2xx_spi_read(drv_data, SSCR1)
  46                                 & ~drv_data->dma_cr1);
  47                write_SSSR_CS(drv_data, drv_data->clear_sr);
  48                if (!pxa25x_ssp_comp(drv_data))
  49                        pxa2xx_spi_write(drv_data, SSTO, 0);
  50
  51                if (error) {
  52                        /* In case we got an error we disable the SSP now */
  53                        pxa2xx_spi_write(drv_data, SSCR0,
  54                                         pxa2xx_spi_read(drv_data, SSCR0)
  55                                         & ~SSCR0_SSE);
  56                        msg->status = -EIO;
  57                }
  58
  59                spi_finalize_current_transfer(drv_data->controller);
  60        }
  61}
  62
  63static void pxa2xx_spi_dma_callback(void *data)
  64{
  65        pxa2xx_spi_dma_transfer_complete(data, false);
  66}
  67
  68static struct dma_async_tx_descriptor *
  69pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
  70                           enum dma_transfer_direction dir,
  71                           struct spi_transfer *xfer)
  72{
  73        struct chip_data *chip =
  74                spi_get_ctldata(drv_data->controller->cur_msg->spi);
  75        enum dma_slave_buswidth width;
  76        struct dma_slave_config cfg;
  77        struct dma_chan *chan;
  78        struct sg_table *sgt;
  79        int ret;
  80
  81        switch (drv_data->n_bytes) {
  82        case 1:
  83                width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  84                break;
  85        case 2:
  86                width = DMA_SLAVE_BUSWIDTH_2_BYTES;
  87                break;
  88        default:
  89                width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  90                break;
  91        }
  92
  93        memset(&cfg, 0, sizeof(cfg));
  94        cfg.direction = dir;
  95
  96        if (dir == DMA_MEM_TO_DEV) {
  97                cfg.dst_addr = drv_data->ssdr_physical;
  98                cfg.dst_addr_width = width;
  99                cfg.dst_maxburst = chip->dma_burst_size;
 100
 101                sgt = &xfer->tx_sg;
 102                chan = drv_data->controller->dma_tx;
 103        } else {
 104                cfg.src_addr = drv_data->ssdr_physical;
 105                cfg.src_addr_width = width;
 106                cfg.src_maxburst = chip->dma_burst_size;
 107
 108                sgt = &xfer->rx_sg;
 109                chan = drv_data->controller->dma_rx;
 110        }
 111
 112        ret = dmaengine_slave_config(chan, &cfg);
 113        if (ret) {
 114                dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n");
 115                return NULL;
 116        }
 117
 118        return dmaengine_prep_slave_sg(chan, sgt->sgl, sgt->nents, dir,
 119                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 120}
 121
 122irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
 123{
 124        u32 status;
 125
 126        status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
 127        if (status & SSSR_ROR) {
 128                dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
 129
 130                dmaengine_terminate_async(drv_data->controller->dma_rx);
 131                dmaengine_terminate_async(drv_data->controller->dma_tx);
 132
 133                pxa2xx_spi_dma_transfer_complete(drv_data, true);
 134                return IRQ_HANDLED;
 135        }
 136
 137        return IRQ_NONE;
 138}
 139
 140int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
 141                           struct spi_transfer *xfer)
 142{
 143        struct dma_async_tx_descriptor *tx_desc, *rx_desc;
 144        int err;
 145
 146        tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV, xfer);
 147        if (!tx_desc) {
 148                dev_err(&drv_data->pdev->dev,
 149                        "failed to get DMA TX descriptor\n");
 150                err = -EBUSY;
 151                goto err_tx;
 152        }
 153
 154        rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM, xfer);
 155        if (!rx_desc) {
 156                dev_err(&drv_data->pdev->dev,
 157                        "failed to get DMA RX descriptor\n");
 158                err = -EBUSY;
 159                goto err_rx;
 160        }
 161
 162        /* We are ready when RX completes */
 163        rx_desc->callback = pxa2xx_spi_dma_callback;
 164        rx_desc->callback_param = drv_data;
 165
 166        dmaengine_submit(rx_desc);
 167        dmaengine_submit(tx_desc);
 168        return 0;
 169
 170err_rx:
 171        dmaengine_terminate_async(drv_data->controller->dma_tx);
 172err_tx:
 173        return err;
 174}
 175
 176void pxa2xx_spi_dma_start(struct driver_data *drv_data)
 177{
 178        dma_async_issue_pending(drv_data->controller->dma_rx);
 179        dma_async_issue_pending(drv_data->controller->dma_tx);
 180
 181        atomic_set(&drv_data->dma_running, 1);
 182}
 183
 184void pxa2xx_spi_dma_stop(struct driver_data *drv_data)
 185{
 186        atomic_set(&drv_data->dma_running, 0);
 187        dmaengine_terminate_sync(drv_data->controller->dma_rx);
 188        dmaengine_terminate_sync(drv_data->controller->dma_tx);
 189}
 190
 191int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
 192{
 193        struct pxa2xx_spi_controller *pdata = drv_data->controller_info;
 194        struct device *dev = &drv_data->pdev->dev;
 195        struct spi_controller *controller = drv_data->controller;
 196        dma_cap_mask_t mask;
 197
 198        dma_cap_zero(mask);
 199        dma_cap_set(DMA_SLAVE, mask);
 200
 201        controller->dma_tx = dma_request_slave_channel_compat(mask,
 202                                pdata->dma_filter, pdata->tx_param, dev, "tx");
 203        if (!controller->dma_tx)
 204                return -ENODEV;
 205
 206        controller->dma_rx = dma_request_slave_channel_compat(mask,
 207                                pdata->dma_filter, pdata->rx_param, dev, "rx");
 208        if (!controller->dma_rx) {
 209                dma_release_channel(controller->dma_tx);
 210                controller->dma_tx = NULL;
 211                return -ENODEV;
 212        }
 213
 214        return 0;
 215}
 216
 217void pxa2xx_spi_dma_release(struct driver_data *drv_data)
 218{
 219        struct spi_controller *controller = drv_data->controller;
 220
 221        if (controller->dma_rx) {
 222                dmaengine_terminate_sync(controller->dma_rx);
 223                dma_release_channel(controller->dma_rx);
 224                controller->dma_rx = NULL;
 225        }
 226        if (controller->dma_tx) {
 227                dmaengine_terminate_sync(controller->dma_tx);
 228                dma_release_channel(controller->dma_tx);
 229                controller->dma_tx = NULL;
 230        }
 231}
 232
 233int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
 234                                           struct spi_device *spi,
 235                                           u8 bits_per_word, u32 *burst_code,
 236                                           u32 *threshold)
 237{
 238        struct pxa2xx_spi_chip *chip_info = spi->controller_data;
 239        struct driver_data *drv_data = spi_controller_get_devdata(spi->controller);
 240        u32 dma_burst_size = drv_data->controller_info->dma_burst_size;
 241
 242        /*
 243         * If the DMA burst size is given in chip_info we use that,
 244         * otherwise we use the default. Also we use the default FIFO
 245         * thresholds for now.
 246         */
 247        *burst_code = chip_info ? chip_info->dma_burst_size : dma_burst_size;
 248        *threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
 249                   | SSCR1_TxTresh(TX_THRESH_DFLT);
 250
 251        return 0;
 252}
 253