linux/drivers/spi/spi-pxa2xx-dma.c
<<
>>
Prefs
   1/*
   2 * PXA2xx SPI DMA engine support.
   3 *
   4 * Copyright (C) 2013, Intel Corporation
   5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11
  12#include <linux/device.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/dmaengine.h>
  15#include <linux/pxa2xx_ssp.h>
  16#include <linux/scatterlist.h>
  17#include <linux/sizes.h>
  18#include <linux/spi/spi.h>
  19#include <linux/spi/pxa2xx_spi.h>
  20
  21#include "spi-pxa2xx.h"
  22
  23static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
  24                                     enum dma_data_direction dir)
  25{
  26        int i, nents, len = drv_data->len;
  27        struct scatterlist *sg;
  28        struct device *dmadev;
  29        struct sg_table *sgt;
  30        void *buf, *pbuf;
  31
  32        if (dir == DMA_TO_DEVICE) {
  33                dmadev = drv_data->tx_chan->device->dev;
  34                sgt = &drv_data->tx_sgt;
  35                buf = drv_data->tx;
  36                drv_data->tx_map_len = len;
  37        } else {
  38                dmadev = drv_data->rx_chan->device->dev;
  39                sgt = &drv_data->rx_sgt;
  40                buf = drv_data->rx;
  41                drv_data->rx_map_len = len;
  42        }
  43
  44        nents = DIV_ROUND_UP(len, SZ_2K);
  45        if (nents != sgt->nents) {
  46                int ret;
  47
  48                sg_free_table(sgt);
  49                ret = sg_alloc_table(sgt, nents, GFP_ATOMIC);
  50                if (ret)
  51                        return ret;
  52        }
  53
  54        pbuf = buf;
  55        for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  56                size_t bytes = min_t(size_t, len, SZ_2K);
  57
  58                if (buf)
  59                        sg_set_buf(sg, pbuf, bytes);
  60                else
  61                        sg_set_buf(sg, drv_data->dummy, bytes);
  62
  63                pbuf += bytes;
  64                len -= bytes;
  65        }
  66
  67        nents = dma_map_sg(dmadev, sgt->sgl, sgt->nents, dir);
  68        if (!nents)
  69                return -ENOMEM;
  70
  71        return nents;
  72}
  73
  74static void pxa2xx_spi_unmap_dma_buffer(struct driver_data *drv_data,
  75                                        enum dma_data_direction dir)
  76{
  77        struct device *dmadev;
  78        struct sg_table *sgt;
  79
  80        if (dir == DMA_TO_DEVICE) {
  81                dmadev = drv_data->tx_chan->device->dev;
  82                sgt = &drv_data->tx_sgt;
  83        } else {
  84                dmadev = drv_data->rx_chan->device->dev;
  85                sgt = &drv_data->rx_sgt;
  86        }
  87
  88        dma_unmap_sg(dmadev, sgt->sgl, sgt->nents, dir);
  89}
  90
  91static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
  92{
  93        if (!drv_data->dma_mapped)
  94                return;
  95
  96        pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_FROM_DEVICE);
  97        pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
  98
  99        drv_data->dma_mapped = 0;
 100}
 101
 102static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
 103                                             bool error)
 104{
 105        struct spi_message *msg = drv_data->cur_msg;
 106
 107        /*
 108         * It is possible that one CPU is handling ROR interrupt and other
 109         * just gets DMA completion. Calling pump_transfers() twice for the
 110         * same transfer leads to problems thus we prevent concurrent calls
 111         * by using ->dma_running.
 112         */
 113        if (atomic_dec_and_test(&drv_data->dma_running)) {
 114                /*
 115                 * If the other CPU is still handling the ROR interrupt we
 116                 * might not know about the error yet. So we re-check the
 117                 * ROR bit here before we clear the status register.
 118                 */
 119                if (!error) {
 120                        u32 status = pxa2xx_spi_read(drv_data, SSSR)
 121                                     & drv_data->mask_sr;
 122                        error = status & SSSR_ROR;
 123                }
 124
 125                /* Clear status & disable interrupts */
 126                pxa2xx_spi_write(drv_data, SSCR1,
 127                                 pxa2xx_spi_read(drv_data, SSCR1)
 128                                 & ~drv_data->dma_cr1);
 129                write_SSSR_CS(drv_data, drv_data->clear_sr);
 130                if (!pxa25x_ssp_comp(drv_data))
 131                        pxa2xx_spi_write(drv_data, SSTO, 0);
 132
 133                if (!error) {
 134                        pxa2xx_spi_unmap_dma_buffers(drv_data);
 135
 136                        drv_data->tx += drv_data->tx_map_len;
 137                        drv_data->rx += drv_data->rx_map_len;
 138
 139                        msg->actual_length += drv_data->len;
 140                        msg->state = pxa2xx_spi_next_transfer(drv_data);
 141                } else {
 142                        /* In case we got an error we disable the SSP now */
 143                        pxa2xx_spi_write(drv_data, SSCR0,
 144                                         pxa2xx_spi_read(drv_data, SSCR0)
 145                                         & ~SSCR0_SSE);
 146
 147                        msg->state = ERROR_STATE;
 148                }
 149
 150                tasklet_schedule(&drv_data->pump_transfers);
 151        }
 152}
 153
 154static void pxa2xx_spi_dma_callback(void *data)
 155{
 156        pxa2xx_spi_dma_transfer_complete(data, false);
 157}
 158
 159static struct dma_async_tx_descriptor *
 160pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
 161                           enum dma_transfer_direction dir)
 162{
 163        struct chip_data *chip = drv_data->cur_chip;
 164        enum dma_slave_buswidth width;
 165        struct dma_slave_config cfg;
 166        struct dma_chan *chan;
 167        struct sg_table *sgt;
 168        int nents, ret;
 169
 170        switch (drv_data->n_bytes) {
 171        case 1:
 172                width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 173                break;
 174        case 2:
 175                width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 176                break;
 177        default:
 178                width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 179                break;
 180        }
 181
 182        memset(&cfg, 0, sizeof(cfg));
 183        cfg.direction = dir;
 184
 185        if (dir == DMA_MEM_TO_DEV) {
 186                cfg.dst_addr = drv_data->ssdr_physical;
 187                cfg.dst_addr_width = width;
 188                cfg.dst_maxburst = chip->dma_burst_size;
 189
 190                sgt = &drv_data->tx_sgt;
 191                nents = drv_data->tx_nents;
 192                chan = drv_data->tx_chan;
 193        } else {
 194                cfg.src_addr = drv_data->ssdr_physical;
 195                cfg.src_addr_width = width;
 196                cfg.src_maxburst = chip->dma_burst_size;
 197
 198                sgt = &drv_data->rx_sgt;
 199                nents = drv_data->rx_nents;
 200                chan = drv_data->rx_chan;
 201        }
 202
 203        ret = dmaengine_slave_config(chan, &cfg);
 204        if (ret) {
 205                dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n");
 206                return NULL;
 207        }
 208
 209        return dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir,
 210                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 211}
 212
 213bool pxa2xx_spi_dma_is_possible(size_t len)
 214{
 215        return len <= MAX_DMA_LEN;
 216}
 217
 218int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
 219{
 220        const struct chip_data *chip = drv_data->cur_chip;
 221        int ret;
 222
 223        if (!chip->enable_dma)
 224                return 0;
 225
 226        /* Don't bother with DMA if we can't do even a single burst */
 227        if (drv_data->len < chip->dma_burst_size)
 228                return 0;
 229
 230        ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_TO_DEVICE);
 231        if (ret <= 0) {
 232                dev_warn(&drv_data->pdev->dev, "failed to DMA map TX\n");
 233                return 0;
 234        }
 235
 236        drv_data->tx_nents = ret;
 237
 238        ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_FROM_DEVICE);
 239        if (ret <= 0) {
 240                pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
 241                dev_warn(&drv_data->pdev->dev, "failed to DMA map RX\n");
 242                return 0;
 243        }
 244
 245        drv_data->rx_nents = ret;
 246        return 1;
 247}
 248
 249irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
 250{
 251        u32 status;
 252
 253        status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
 254        if (status & SSSR_ROR) {
 255                dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
 256
 257                dmaengine_terminate_async(drv_data->rx_chan);
 258                dmaengine_terminate_async(drv_data->tx_chan);
 259
 260                pxa2xx_spi_dma_transfer_complete(drv_data, true);
 261                return IRQ_HANDLED;
 262        }
 263
 264        return IRQ_NONE;
 265}
 266
 267int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
 268{
 269        struct dma_async_tx_descriptor *tx_desc, *rx_desc;
 270
 271        tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV);
 272        if (!tx_desc) {
 273                dev_err(&drv_data->pdev->dev,
 274                        "failed to get DMA TX descriptor\n");
 275                return -EBUSY;
 276        }
 277
 278        rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM);
 279        if (!rx_desc) {
 280                dev_err(&drv_data->pdev->dev,
 281                        "failed to get DMA RX descriptor\n");
 282                return -EBUSY;
 283        }
 284
 285        /* We are ready when RX completes */
 286        rx_desc->callback = pxa2xx_spi_dma_callback;
 287        rx_desc->callback_param = drv_data;
 288
 289        dmaengine_submit(rx_desc);
 290        dmaengine_submit(tx_desc);
 291        return 0;
 292}
 293
 294void pxa2xx_spi_dma_start(struct driver_data *drv_data)
 295{
 296        dma_async_issue_pending(drv_data->rx_chan);
 297        dma_async_issue_pending(drv_data->tx_chan);
 298
 299        atomic_set(&drv_data->dma_running, 1);
 300}
 301
 302int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
 303{
 304        struct pxa2xx_spi_master *pdata = drv_data->master_info;
 305        struct device *dev = &drv_data->pdev->dev;
 306        dma_cap_mask_t mask;
 307
 308        dma_cap_zero(mask);
 309        dma_cap_set(DMA_SLAVE, mask);
 310
 311        drv_data->dummy = devm_kzalloc(dev, SZ_2K, GFP_KERNEL);
 312        if (!drv_data->dummy)
 313                return -ENOMEM;
 314
 315        drv_data->tx_chan = dma_request_slave_channel_compat(mask,
 316                                pdata->dma_filter, pdata->tx_param, dev, "tx");
 317        if (!drv_data->tx_chan)
 318                return -ENODEV;
 319
 320        drv_data->rx_chan = dma_request_slave_channel_compat(mask,
 321                                pdata->dma_filter, pdata->rx_param, dev, "rx");
 322        if (!drv_data->rx_chan) {
 323                dma_release_channel(drv_data->tx_chan);
 324                drv_data->tx_chan = NULL;
 325                return -ENODEV;
 326        }
 327
 328        return 0;
 329}
 330
 331void pxa2xx_spi_dma_release(struct driver_data *drv_data)
 332{
 333        if (drv_data->rx_chan) {
 334                dmaengine_terminate_sync(drv_data->rx_chan);
 335                dma_release_channel(drv_data->rx_chan);
 336                sg_free_table(&drv_data->rx_sgt);
 337                drv_data->rx_chan = NULL;
 338        }
 339        if (drv_data->tx_chan) {
 340                dmaengine_terminate_sync(drv_data->tx_chan);
 341                dma_release_channel(drv_data->tx_chan);
 342                sg_free_table(&drv_data->tx_sgt);
 343                drv_data->tx_chan = NULL;
 344        }
 345}
 346
 347int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
 348                                           struct spi_device *spi,
 349                                           u8 bits_per_word, u32 *burst_code,
 350                                           u32 *threshold)
 351{
 352        struct pxa2xx_spi_chip *chip_info = spi->controller_data;
 353
 354        /*
 355         * If the DMA burst size is given in chip_info we use that,
 356         * otherwise we use the default. Also we use the default FIFO
 357         * thresholds for now.
 358         */
 359        *burst_code = chip_info ? chip_info->dma_burst_size : 1;
 360        *threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
 361                   | SSCR1_TxTresh(TX_THRESH_DFLT);
 362
 363        return 0;
 364}
 365