linux/drivers/tty/serial/8250/8250_dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * 8250_dma.c - DMA Engine API support for 8250.c
   4 *
   5 * Copyright (C) 2013 Intel Corporation
   6 */
   7#include <linux/tty.h>
   8#include <linux/tty_flip.h>
   9#include <linux/serial_reg.h>
  10#include <linux/dma-mapping.h>
  11
  12#include "8250.h"
  13
  14static void __dma_tx_complete(void *param)
  15{
  16        struct uart_8250_port   *p = param;
  17        struct uart_8250_dma    *dma = p->dma;
  18        struct circ_buf         *xmit = &p->port.state->xmit;
  19        unsigned long   flags;
  20        int             ret;
  21
  22        dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
  23                                UART_XMIT_SIZE, DMA_TO_DEVICE);
  24
  25        spin_lock_irqsave(&p->port.lock, flags);
  26
  27        dma->tx_running = 0;
  28
  29        xmit->tail += dma->tx_size;
  30        xmit->tail &= UART_XMIT_SIZE - 1;
  31        p->port.icount.tx += dma->tx_size;
  32
  33        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  34                uart_write_wakeup(&p->port);
  35
  36        ret = serial8250_tx_dma(p);
  37        if (ret) {
  38                p->ier |= UART_IER_THRI;
  39                serial_port_out(&p->port, UART_IER, p->ier);
  40        }
  41
  42        spin_unlock_irqrestore(&p->port.lock, flags);
  43}
  44
  45static void __dma_rx_complete(void *param)
  46{
  47        struct uart_8250_port   *p = param;
  48        struct uart_8250_dma    *dma = p->dma;
  49        struct tty_port         *tty_port = &p->port.state->port;
  50        struct dma_tx_state     state;
  51        int                     count;
  52
  53        dma->rx_running = 0;
  54        dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
  55
  56        count = dma->rx_size - state.residue;
  57
  58        tty_insert_flip_string(tty_port, dma->rx_buf, count);
  59        p->port.icount.rx += count;
  60
  61        tty_flip_buffer_push(tty_port);
  62}
  63
  64int serial8250_tx_dma(struct uart_8250_port *p)
  65{
  66        struct uart_8250_dma            *dma = p->dma;
  67        struct circ_buf                 *xmit = &p->port.state->xmit;
  68        struct dma_async_tx_descriptor  *desc;
  69        int ret;
  70
  71        if (dma->tx_running)
  72                return 0;
  73
  74        if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
  75                /* We have been called from __dma_tx_complete() */
  76                serial8250_rpm_put_tx(p);
  77                return 0;
  78        }
  79
  80        dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
  81
  82        desc = dmaengine_prep_slave_single(dma->txchan,
  83                                           dma->tx_addr + xmit->tail,
  84                                           dma->tx_size, DMA_MEM_TO_DEV,
  85                                           DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  86        if (!desc) {
  87                ret = -EBUSY;
  88                goto err;
  89        }
  90
  91        dma->tx_running = 1;
  92        desc->callback = __dma_tx_complete;
  93        desc->callback_param = p;
  94
  95        dma->tx_cookie = dmaengine_submit(desc);
  96
  97        dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
  98                                   UART_XMIT_SIZE, DMA_TO_DEVICE);
  99
 100        dma_async_issue_pending(dma->txchan);
 101        if (dma->tx_err) {
 102                dma->tx_err = 0;
 103                if (p->ier & UART_IER_THRI) {
 104                        p->ier &= ~UART_IER_THRI;
 105                        serial_out(p, UART_IER, p->ier);
 106                }
 107        }
 108        return 0;
 109err:
 110        dma->tx_err = 1;
 111        return ret;
 112}
 113
 114int serial8250_rx_dma(struct uart_8250_port *p)
 115{
 116        struct uart_8250_dma            *dma = p->dma;
 117        struct dma_async_tx_descriptor  *desc;
 118
 119        if (dma->rx_running)
 120                return 0;
 121
 122        desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
 123                                           dma->rx_size, DMA_DEV_TO_MEM,
 124                                           DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 125        if (!desc)
 126                return -EBUSY;
 127
 128        dma->rx_running = 1;
 129        desc->callback = __dma_rx_complete;
 130        desc->callback_param = p;
 131
 132        dma->rx_cookie = dmaengine_submit(desc);
 133
 134        dma_async_issue_pending(dma->rxchan);
 135
 136        return 0;
 137}
 138
 139void serial8250_rx_dma_flush(struct uart_8250_port *p)
 140{
 141        struct uart_8250_dma *dma = p->dma;
 142
 143        if (dma->rx_running) {
 144                dmaengine_pause(dma->rxchan);
 145                __dma_rx_complete(p);
 146                dmaengine_terminate_async(dma->rxchan);
 147        }
 148}
 149EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
 150
 151int serial8250_request_dma(struct uart_8250_port *p)
 152{
 153        struct uart_8250_dma    *dma = p->dma;
 154        phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
 155                                  dma->rx_dma_addr : p->port.mapbase;
 156        phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
 157                                  dma->tx_dma_addr : p->port.mapbase;
 158        dma_cap_mask_t          mask;
 159        struct dma_slave_caps   caps;
 160        int                     ret;
 161
 162        /* Default slave configuration parameters */
 163        dma->rxconf.direction           = DMA_DEV_TO_MEM;
 164        dma->rxconf.src_addr_width      = DMA_SLAVE_BUSWIDTH_1_BYTE;
 165        dma->rxconf.src_addr            = rx_dma_addr + UART_RX;
 166
 167        dma->txconf.direction           = DMA_MEM_TO_DEV;
 168        dma->txconf.dst_addr_width      = DMA_SLAVE_BUSWIDTH_1_BYTE;
 169        dma->txconf.dst_addr            = tx_dma_addr + UART_TX;
 170
 171        dma_cap_zero(mask);
 172        dma_cap_set(DMA_SLAVE, mask);
 173
 174        /* Get a channel for RX */
 175        dma->rxchan = dma_request_slave_channel_compat(mask,
 176                                                       dma->fn, dma->rx_param,
 177                                                       p->port.dev, "rx");
 178        if (!dma->rxchan)
 179                return -ENODEV;
 180
 181        /* 8250 rx dma requires dmaengine driver to support pause/terminate */
 182        ret = dma_get_slave_caps(dma->rxchan, &caps);
 183        if (ret)
 184                goto release_rx;
 185        if (!caps.cmd_pause || !caps.cmd_terminate ||
 186            caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
 187                ret = -EINVAL;
 188                goto release_rx;
 189        }
 190
 191        dmaengine_slave_config(dma->rxchan, &dma->rxconf);
 192
 193        /* Get a channel for TX */
 194        dma->txchan = dma_request_slave_channel_compat(mask,
 195                                                       dma->fn, dma->tx_param,
 196                                                       p->port.dev, "tx");
 197        if (!dma->txchan) {
 198                ret = -ENODEV;
 199                goto release_rx;
 200        }
 201
 202        /* 8250 tx dma requires dmaengine driver to support terminate */
 203        ret = dma_get_slave_caps(dma->txchan, &caps);
 204        if (ret)
 205                goto err;
 206        if (!caps.cmd_terminate) {
 207                ret = -EINVAL;
 208                goto err;
 209        }
 210
 211        dmaengine_slave_config(dma->txchan, &dma->txconf);
 212
 213        /* RX buffer */
 214        if (!dma->rx_size)
 215                dma->rx_size = PAGE_SIZE;
 216
 217        dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
 218                                        &dma->rx_addr, GFP_KERNEL);
 219        if (!dma->rx_buf) {
 220                ret = -ENOMEM;
 221                goto err;
 222        }
 223
 224        /* TX buffer */
 225        dma->tx_addr = dma_map_single(dma->txchan->device->dev,
 226                                        p->port.state->xmit.buf,
 227                                        UART_XMIT_SIZE,
 228                                        DMA_TO_DEVICE);
 229        if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
 230                dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
 231                                  dma->rx_buf, dma->rx_addr);
 232                ret = -ENOMEM;
 233                goto err;
 234        }
 235
 236        dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
 237
 238        return 0;
 239err:
 240        dma_release_channel(dma->txchan);
 241release_rx:
 242        dma_release_channel(dma->rxchan);
 243        return ret;
 244}
 245EXPORT_SYMBOL_GPL(serial8250_request_dma);
 246
 247void serial8250_release_dma(struct uart_8250_port *p)
 248{
 249        struct uart_8250_dma *dma = p->dma;
 250
 251        if (!dma)
 252                return;
 253
 254        /* Release RX resources */
 255        dmaengine_terminate_sync(dma->rxchan);
 256        dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
 257                          dma->rx_addr);
 258        dma_release_channel(dma->rxchan);
 259        dma->rxchan = NULL;
 260
 261        /* Release TX resources */
 262        dmaengine_terminate_sync(dma->txchan);
 263        dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
 264                         UART_XMIT_SIZE, DMA_TO_DEVICE);
 265        dma_release_channel(dma->txchan);
 266        dma->txchan = NULL;
 267        dma->tx_running = 0;
 268
 269        dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
 270}
 271EXPORT_SYMBOL_GPL(serial8250_release_dma);
 272