linux/drivers/tty/serial/amba-pl011.c
<<
>>
Prefs
   1/*
   2 *  Driver for AMBA serial ports
   3 *
   4 *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
   5 *
   6 *  Copyright 1999 ARM Limited
   7 *  Copyright (C) 2000 Deep Blue Solutions Ltd.
   8 *  Copyright (C) 2010 ST-Ericsson SA
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  23 *
  24 * This is a generic driver for ARM AMBA-type serial ports.  They
  25 * have a lot of 16550-like features, but are not register compatible.
  26 * Note that although they do have CTS, DCD and DSR inputs, they do
  27 * not have an RI input, nor do they have DTR or RTS outputs.  If
  28 * required, these have to be supplied via some other means (eg, GPIO)
  29 * and hooked into this driver.
  30 */
  31
  32
  33#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
  34#define SUPPORT_SYSRQ
  35#endif
  36
  37#include <linux/module.h>
  38#include <linux/ioport.h>
  39#include <linux/init.h>
  40#include <linux/console.h>
  41#include <linux/sysrq.h>
  42#include <linux/device.h>
  43#include <linux/tty.h>
  44#include <linux/tty_flip.h>
  45#include <linux/serial_core.h>
  46#include <linux/serial.h>
  47#include <linux/amba/bus.h>
  48#include <linux/amba/serial.h>
  49#include <linux/clk.h>
  50#include <linux/slab.h>
  51#include <linux/dmaengine.h>
  52#include <linux/dma-mapping.h>
  53#include <linux/scatterlist.h>
  54#include <linux/delay.h>
  55#include <linux/types.h>
  56#include <linux/of.h>
  57#include <linux/of_device.h>
  58#include <linux/pinctrl/consumer.h>
  59#include <linux/sizes.h>
  60#include <linux/io.h>
  61
  62#define UART_NR                 14
  63
  64#define SERIAL_AMBA_MAJOR       204
  65#define SERIAL_AMBA_MINOR       64
  66#define SERIAL_AMBA_NR          UART_NR
  67
  68#define AMBA_ISR_PASS_LIMIT     256
  69
  70#define UART_DR_ERROR           (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
  71#define UART_DUMMY_DR_RX        (1 << 16)
  72
  73/* There is by now at least one vendor with differing details, so handle it */
  74struct vendor_data {
  75        unsigned int            ifls;
  76        unsigned int            lcrh_tx;
  77        unsigned int            lcrh_rx;
  78        bool                    oversampling;
  79        bool                    dma_threshold;
  80        bool                    cts_event_workaround;
  81
  82        unsigned int (*get_fifosize)(struct amba_device *dev);
  83};
  84
  85static unsigned int get_fifosize_arm(struct amba_device *dev)
  86{
  87        return amba_rev(dev) < 3 ? 16 : 32;
  88}
  89
  90static struct vendor_data vendor_arm = {
  91        .ifls                   = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
  92        .lcrh_tx                = UART011_LCRH,
  93        .lcrh_rx                = UART011_LCRH,
  94        .oversampling           = false,
  95        .dma_threshold          = false,
  96        .cts_event_workaround   = false,
  97        .get_fifosize           = get_fifosize_arm,
  98};
  99
 100static unsigned int get_fifosize_st(struct amba_device *dev)
 101{
 102        return 64;
 103}
 104
 105static struct vendor_data vendor_st = {
 106        .ifls                   = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
 107        .lcrh_tx                = ST_UART011_LCRH_TX,
 108        .lcrh_rx                = ST_UART011_LCRH_RX,
 109        .oversampling           = true,
 110        .dma_threshold          = true,
 111        .cts_event_workaround   = true,
 112        .get_fifosize           = get_fifosize_st,
 113};
 114
 115/* Deals with DMA transactions */
 116
 117struct pl011_sgbuf {
 118        struct scatterlist sg;
 119        char *buf;
 120};
 121
 122struct pl011_dmarx_data {
 123        struct dma_chan         *chan;
 124        struct completion       complete;
 125        bool                    use_buf_b;
 126        struct pl011_sgbuf      sgbuf_a;
 127        struct pl011_sgbuf      sgbuf_b;
 128        dma_cookie_t            cookie;
 129        bool                    running;
 130        struct timer_list       timer;
 131        unsigned int last_residue;
 132        unsigned long last_jiffies;
 133        bool auto_poll_rate;
 134        unsigned int poll_rate;
 135        unsigned int poll_timeout;
 136};
 137
 138struct pl011_dmatx_data {
 139        struct dma_chan         *chan;
 140        struct scatterlist      sg;
 141        char                    *buf;
 142        bool                    queued;
 143};
 144
 145/*
 146 * We wrap our port structure around the generic uart_port.
 147 */
 148struct uart_amba_port {
 149        struct uart_port        port;
 150        struct clk              *clk;
 151        const struct vendor_data *vendor;
 152        unsigned int            dmacr;          /* dma control reg */
 153        unsigned int            im;             /* interrupt mask */
 154        unsigned int            old_status;
 155        unsigned int            fifosize;       /* vendor-specific */
 156        unsigned int            lcrh_tx;        /* vendor-specific */
 157        unsigned int            lcrh_rx;        /* vendor-specific */
 158        unsigned int            old_cr;         /* state during shutdown */
 159        bool                    autorts;
 160        char                    type[12];
 161#ifdef CONFIG_DMA_ENGINE
 162        /* DMA stuff */
 163        bool                    using_tx_dma;
 164        bool                    using_rx_dma;
 165        struct pl011_dmarx_data dmarx;
 166        struct pl011_dmatx_data dmatx;
 167#endif
 168};
 169
 170/*
 171 * Reads up to 256 characters from the FIFO or until it's empty and
 172 * inserts them into the TTY layer. Returns the number of characters
 173 * read from the FIFO.
 174 */
 175static int pl011_fifo_to_tty(struct uart_amba_port *uap)
 176{
 177        u16 status, ch;
 178        unsigned int flag, max_count = 256;
 179        int fifotaken = 0;
 180
 181        while (max_count--) {
 182                status = readw(uap->port.membase + UART01x_FR);
 183                if (status & UART01x_FR_RXFE)
 184                        break;
 185
 186                /* Take chars from the FIFO and update status */
 187                ch = readw(uap->port.membase + UART01x_DR) |
 188                        UART_DUMMY_DR_RX;
 189                flag = TTY_NORMAL;
 190                uap->port.icount.rx++;
 191                fifotaken++;
 192
 193                if (unlikely(ch & UART_DR_ERROR)) {
 194                        if (ch & UART011_DR_BE) {
 195                                ch &= ~(UART011_DR_FE | UART011_DR_PE);
 196                                uap->port.icount.brk++;
 197                                if (uart_handle_break(&uap->port))
 198                                        continue;
 199                        } else if (ch & UART011_DR_PE)
 200                                uap->port.icount.parity++;
 201                        else if (ch & UART011_DR_FE)
 202                                uap->port.icount.frame++;
 203                        if (ch & UART011_DR_OE)
 204                                uap->port.icount.overrun++;
 205
 206                        ch &= uap->port.read_status_mask;
 207
 208                        if (ch & UART011_DR_BE)
 209                                flag = TTY_BREAK;
 210                        else if (ch & UART011_DR_PE)
 211                                flag = TTY_PARITY;
 212                        else if (ch & UART011_DR_FE)
 213                                flag = TTY_FRAME;
 214                }
 215
 216                if (uart_handle_sysrq_char(&uap->port, ch & 255))
 217                        continue;
 218
 219                uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
 220        }
 221
 222        return fifotaken;
 223}
 224
 225
 226/*
 227 * All the DMA operation mode stuff goes inside this ifdef.
 228 * This assumes that you have a generic DMA device interface,
 229 * no custom DMA interfaces are supported.
 230 */
 231#ifdef CONFIG_DMA_ENGINE
 232
 233#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
 234
 235static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
 236        enum dma_data_direction dir)
 237{
 238        dma_addr_t dma_addr;
 239
 240        sg->buf = dma_alloc_coherent(chan->device->dev,
 241                PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
 242        if (!sg->buf)
 243                return -ENOMEM;
 244
 245        sg_init_table(&sg->sg, 1);
 246        sg_set_page(&sg->sg, phys_to_page(dma_addr),
 247                PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
 248        sg_dma_address(&sg->sg) = dma_addr;
 249        sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
 250
 251        return 0;
 252}
 253
 254static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
 255        enum dma_data_direction dir)
 256{
 257        if (sg->buf) {
 258                dma_free_coherent(chan->device->dev,
 259                        PL011_DMA_BUFFER_SIZE, sg->buf,
 260                        sg_dma_address(&sg->sg));
 261        }
 262}
 263
 264static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *uap)
 265{
 266        /* DMA is the sole user of the platform data right now */
 267        struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
 268        struct dma_slave_config tx_conf = {
 269                .dst_addr = uap->port.mapbase + UART01x_DR,
 270                .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
 271                .direction = DMA_MEM_TO_DEV,
 272                .dst_maxburst = uap->fifosize >> 1,
 273                .device_fc = false,
 274        };
 275        struct dma_chan *chan;
 276        dma_cap_mask_t mask;
 277
 278        chan = dma_request_slave_channel(dev, "tx");
 279
 280        if (!chan) {
 281                /* We need platform data */
 282                if (!plat || !plat->dma_filter) {
 283                        dev_info(uap->port.dev, "no DMA platform data\n");
 284                        return;
 285                }
 286
 287                /* Try to acquire a generic DMA engine slave TX channel */
 288                dma_cap_zero(mask);
 289                dma_cap_set(DMA_SLAVE, mask);
 290
 291                chan = dma_request_channel(mask, plat->dma_filter,
 292                                                plat->dma_tx_param);
 293                if (!chan) {
 294                        dev_err(uap->port.dev, "no TX DMA channel!\n");
 295                        return;
 296                }
 297        }
 298
 299        dmaengine_slave_config(chan, &tx_conf);
 300        uap->dmatx.chan = chan;
 301
 302        dev_info(uap->port.dev, "DMA channel TX %s\n",
 303                 dma_chan_name(uap->dmatx.chan));
 304
 305        /* Optionally make use of an RX channel as well */
 306        chan = dma_request_slave_channel(dev, "rx");
 307
 308        if (!chan && plat->dma_rx_param) {
 309                chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
 310
 311                if (!chan) {
 312                        dev_err(uap->port.dev, "no RX DMA channel!\n");
 313                        return;
 314                }
 315        }
 316
 317        if (chan) {
 318                struct dma_slave_config rx_conf = {
 319                        .src_addr = uap->port.mapbase + UART01x_DR,
 320                        .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
 321                        .direction = DMA_DEV_TO_MEM,
 322                        .src_maxburst = uap->fifosize >> 2,
 323                        .device_fc = false,
 324                };
 325                struct dma_slave_caps caps;
 326
 327                /*
 328                 * Some DMA controllers provide information on their capabilities.
 329                 * If the controller does, check for suitable residue processing
 330                 * otherwise assime all is well.
 331                 */
 332                if (0 == dma_get_slave_caps(chan, &caps)) {
 333                        if (caps.residue_granularity ==
 334                                        DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
 335                                dma_release_channel(chan);
 336                                dev_info(uap->port.dev,
 337                                        "RX DMA disabled - no residue processing\n");
 338                                return;
 339                        }
 340                }
 341                dmaengine_slave_config(chan, &rx_conf);
 342                uap->dmarx.chan = chan;
 343
 344                uap->dmarx.auto_poll_rate = false;
 345                if (plat && plat->dma_rx_poll_enable) {
 346                        /* Set poll rate if specified. */
 347                        if (plat->dma_rx_poll_rate) {
 348                                uap->dmarx.auto_poll_rate = false;
 349                                uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
 350                        } else {
 351                                /*
 352                                 * 100 ms defaults to poll rate if not
 353                                 * specified. This will be adjusted with
 354                                 * the baud rate at set_termios.
 355                                 */
 356                                uap->dmarx.auto_poll_rate = true;
 357                                uap->dmarx.poll_rate =  100;
 358                        }
 359                        /* 3 secs defaults poll_timeout if not specified. */
 360                        if (plat->dma_rx_poll_timeout)
 361                                uap->dmarx.poll_timeout =
 362                                        plat->dma_rx_poll_timeout;
 363                        else
 364                                uap->dmarx.poll_timeout = 3000;
 365                } else if (!plat && dev->of_node) {
 366                        uap->dmarx.auto_poll_rate = of_property_read_bool(
 367                                                dev->of_node, "auto-poll");
 368                        if (uap->dmarx.auto_poll_rate) {
 369                                u32 x;
 370
 371                                if (0 == of_property_read_u32(dev->of_node,
 372                                                "poll-rate-ms", &x))
 373                                        uap->dmarx.poll_rate = x;
 374                                else
 375                                        uap->dmarx.poll_rate = 100;
 376                                if (0 == of_property_read_u32(dev->of_node,
 377                                                "poll-timeout-ms", &x))
 378                                        uap->dmarx.poll_timeout = x;
 379                                else
 380                                        uap->dmarx.poll_timeout = 3000;
 381                        }
 382                }
 383                dev_info(uap->port.dev, "DMA channel RX %s\n",
 384                         dma_chan_name(uap->dmarx.chan));
 385        }
 386}
 387
 388#ifndef MODULE
 389/*
 390 * Stack up the UARTs and let the above initcall be done at device
 391 * initcall time, because the serial driver is called as an arch
 392 * initcall, and at this time the DMA subsystem is not yet registered.
 393 * At this point the driver will switch over to using DMA where desired.
 394 */
 395struct dma_uap {
 396        struct list_head node;
 397        struct uart_amba_port *uap;
 398        struct device *dev;
 399};
 400
 401static LIST_HEAD(pl011_dma_uarts);
 402
 403static int __init pl011_dma_initcall(void)
 404{
 405        struct list_head *node, *tmp;
 406
 407        list_for_each_safe(node, tmp, &pl011_dma_uarts) {
 408                struct dma_uap *dmau = list_entry(node, struct dma_uap, node);
 409                pl011_dma_probe_initcall(dmau->dev, dmau->uap);
 410                list_del(node);
 411                kfree(dmau);
 412        }
 413        return 0;
 414}
 415
 416device_initcall(pl011_dma_initcall);
 417
 418static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
 419{
 420        struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL);
 421        if (dmau) {
 422                dmau->uap = uap;
 423                dmau->dev = dev;
 424                list_add_tail(&dmau->node, &pl011_dma_uarts);
 425        }
 426}
 427#else
 428static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
 429{
 430        pl011_dma_probe_initcall(dev, uap);
 431}
 432#endif
 433
 434static void pl011_dma_remove(struct uart_amba_port *uap)
 435{
 436        /* TODO: remove the initcall if it has not yet executed */
 437        if (uap->dmatx.chan)
 438                dma_release_channel(uap->dmatx.chan);
 439        if (uap->dmarx.chan)
 440                dma_release_channel(uap->dmarx.chan);
 441}
 442
 443/* Forward declare this for the refill routine */
 444static int pl011_dma_tx_refill(struct uart_amba_port *uap);
 445
 446/*
 447 * The current DMA TX buffer has been sent.
 448 * Try to queue up another DMA buffer.
 449 */
 450static void pl011_dma_tx_callback(void *data)
 451{
 452        struct uart_amba_port *uap = data;
 453        struct pl011_dmatx_data *dmatx = &uap->dmatx;
 454        unsigned long flags;
 455        u16 dmacr;
 456
 457        spin_lock_irqsave(&uap->port.lock, flags);
 458        if (uap->dmatx.queued)
 459                dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
 460                             DMA_TO_DEVICE);
 461
 462        dmacr = uap->dmacr;
 463        uap->dmacr = dmacr & ~UART011_TXDMAE;
 464        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 465
 466        /*
 467         * If TX DMA was disabled, it means that we've stopped the DMA for
 468         * some reason (eg, XOFF received, or we want to send an X-char.)
 469         *
 470         * Note: we need to be careful here of a potential race between DMA
 471         * and the rest of the driver - if the driver disables TX DMA while
 472         * a TX buffer completing, we must update the tx queued status to
 473         * get further refills (hence we check dmacr).
 474         */
 475        if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
 476            uart_circ_empty(&uap->port.state->xmit)) {
 477                uap->dmatx.queued = false;
 478                spin_unlock_irqrestore(&uap->port.lock, flags);
 479                return;
 480        }
 481
 482        if (pl011_dma_tx_refill(uap) <= 0) {
 483                /*
 484                 * We didn't queue a DMA buffer for some reason, but we
 485                 * have data pending to be sent.  Re-enable the TX IRQ.
 486                 */
 487                uap->im |= UART011_TXIM;
 488                writew(uap->im, uap->port.membase + UART011_IMSC);
 489        }
 490        spin_unlock_irqrestore(&uap->port.lock, flags);
 491}
 492
 493/*
 494 * Try to refill the TX DMA buffer.
 495 * Locking: called with port lock held and IRQs disabled.
 496 * Returns:
 497 *   1 if we queued up a TX DMA buffer.
 498 *   0 if we didn't want to handle this by DMA
 499 *  <0 on error
 500 */
 501static int pl011_dma_tx_refill(struct uart_amba_port *uap)
 502{
 503        struct pl011_dmatx_data *dmatx = &uap->dmatx;
 504        struct dma_chan *chan = dmatx->chan;
 505        struct dma_device *dma_dev = chan->device;
 506        struct dma_async_tx_descriptor *desc;
 507        struct circ_buf *xmit = &uap->port.state->xmit;
 508        unsigned int count;
 509
 510        /*
 511         * Try to avoid the overhead involved in using DMA if the
 512         * transaction fits in the first half of the FIFO, by using
 513         * the standard interrupt handling.  This ensures that we
 514         * issue a uart_write_wakeup() at the appropriate time.
 515         */
 516        count = uart_circ_chars_pending(xmit);
 517        if (count < (uap->fifosize >> 1)) {
 518                uap->dmatx.queued = false;
 519                return 0;
 520        }
 521
 522        /*
 523         * Bodge: don't send the last character by DMA, as this
 524         * will prevent XON from notifying us to restart DMA.
 525         */
 526        count -= 1;
 527
 528        /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
 529        if (count > PL011_DMA_BUFFER_SIZE)
 530                count = PL011_DMA_BUFFER_SIZE;
 531
 532        if (xmit->tail < xmit->head)
 533                memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
 534        else {
 535                size_t first = UART_XMIT_SIZE - xmit->tail;
 536                size_t second;
 537
 538                if (first > count)
 539                        first = count;
 540                second = count - first;
 541
 542                memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
 543                if (second)
 544                        memcpy(&dmatx->buf[first], &xmit->buf[0], second);
 545        }
 546
 547        dmatx->sg.length = count;
 548
 549        if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
 550                uap->dmatx.queued = false;
 551                dev_dbg(uap->port.dev, "unable to map TX DMA\n");
 552                return -EBUSY;
 553        }
 554
 555        desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
 556                                             DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 557        if (!desc) {
 558                dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
 559                uap->dmatx.queued = false;
 560                /*
 561                 * If DMA cannot be used right now, we complete this
 562                 * transaction via IRQ and let the TTY layer retry.
 563                 */
 564                dev_dbg(uap->port.dev, "TX DMA busy\n");
 565                return -EBUSY;
 566        }
 567
 568        /* Some data to go along to the callback */
 569        desc->callback = pl011_dma_tx_callback;
 570        desc->callback_param = uap;
 571
 572        /* All errors should happen at prepare time */
 573        dmaengine_submit(desc);
 574
 575        /* Fire the DMA transaction */
 576        dma_dev->device_issue_pending(chan);
 577
 578        uap->dmacr |= UART011_TXDMAE;
 579        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 580        uap->dmatx.queued = true;
 581
 582        /*
 583         * Now we know that DMA will fire, so advance the ring buffer
 584         * with the stuff we just dispatched.
 585         */
 586        xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
 587        uap->port.icount.tx += count;
 588
 589        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 590                uart_write_wakeup(&uap->port);
 591
 592        return 1;
 593}
 594
 595/*
 596 * We received a transmit interrupt without a pending X-char but with
 597 * pending characters.
 598 * Locking: called with port lock held and IRQs disabled.
 599 * Returns:
 600 *   false if we want to use PIO to transmit
 601 *   true if we queued a DMA buffer
 602 */
 603static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
 604{
 605        if (!uap->using_tx_dma)
 606                return false;
 607
 608        /*
 609         * If we already have a TX buffer queued, but received a
 610         * TX interrupt, it will be because we've just sent an X-char.
 611         * Ensure the TX DMA is enabled and the TX IRQ is disabled.
 612         */
 613        if (uap->dmatx.queued) {
 614                uap->dmacr |= UART011_TXDMAE;
 615                writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 616                uap->im &= ~UART011_TXIM;
 617                writew(uap->im, uap->port.membase + UART011_IMSC);
 618                return true;
 619        }
 620
 621        /*
 622         * We don't have a TX buffer queued, so try to queue one.
 623         * If we successfully queued a buffer, mask the TX IRQ.
 624         */
 625        if (pl011_dma_tx_refill(uap) > 0) {
 626                uap->im &= ~UART011_TXIM;
 627                writew(uap->im, uap->port.membase + UART011_IMSC);
 628                return true;
 629        }
 630        return false;
 631}
 632
 633/*
 634 * Stop the DMA transmit (eg, due to received XOFF).
 635 * Locking: called with port lock held and IRQs disabled.
 636 */
 637static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
 638{
 639        if (uap->dmatx.queued) {
 640                uap->dmacr &= ~UART011_TXDMAE;
 641                writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 642        }
 643}
 644
 645/*
 646 * Try to start a DMA transmit, or in the case of an XON/OFF
 647 * character queued for send, try to get that character out ASAP.
 648 * Locking: called with port lock held and IRQs disabled.
 649 * Returns:
 650 *   false if we want the TX IRQ to be enabled
 651 *   true if we have a buffer queued
 652 */
 653static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
 654{
 655        u16 dmacr;
 656
 657        if (!uap->using_tx_dma)
 658                return false;
 659
 660        if (!uap->port.x_char) {
 661                /* no X-char, try to push chars out in DMA mode */
 662                bool ret = true;
 663
 664                if (!uap->dmatx.queued) {
 665                        if (pl011_dma_tx_refill(uap) > 0) {
 666                                uap->im &= ~UART011_TXIM;
 667                                ret = true;
 668                        } else {
 669                                uap->im |= UART011_TXIM;
 670                                ret = false;
 671                        }
 672                        writew(uap->im, uap->port.membase + UART011_IMSC);
 673                } else if (!(uap->dmacr & UART011_TXDMAE)) {
 674                        uap->dmacr |= UART011_TXDMAE;
 675                        writew(uap->dmacr,
 676                                       uap->port.membase + UART011_DMACR);
 677                }
 678                return ret;
 679        }
 680
 681        /*
 682         * We have an X-char to send.  Disable DMA to prevent it loading
 683         * the TX fifo, and then see if we can stuff it into the FIFO.
 684         */
 685        dmacr = uap->dmacr;
 686        uap->dmacr &= ~UART011_TXDMAE;
 687        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 688
 689        if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
 690                /*
 691                 * No space in the FIFO, so enable the transmit interrupt
 692                 * so we know when there is space.  Note that once we've
 693                 * loaded the character, we should just re-enable DMA.
 694                 */
 695                return false;
 696        }
 697
 698        writew(uap->port.x_char, uap->port.membase + UART01x_DR);
 699        uap->port.icount.tx++;
 700        uap->port.x_char = 0;
 701
 702        /* Success - restore the DMA state */
 703        uap->dmacr = dmacr;
 704        writew(dmacr, uap->port.membase + UART011_DMACR);
 705
 706        return true;
 707}
 708
 709/*
 710 * Flush the transmit buffer.
 711 * Locking: called with port lock held and IRQs disabled.
 712 */
 713static void pl011_dma_flush_buffer(struct uart_port *port)
 714__releases(&uap->port.lock)
 715__acquires(&uap->port.lock)
 716{
 717        struct uart_amba_port *uap =
 718            container_of(port, struct uart_amba_port, port);
 719
 720        if (!uap->using_tx_dma)
 721                return;
 722
 723        /* Avoid deadlock with the DMA engine callback */
 724        spin_unlock(&uap->port.lock);
 725        dmaengine_terminate_all(uap->dmatx.chan);
 726        spin_lock(&uap->port.lock);
 727        if (uap->dmatx.queued) {
 728                dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
 729                             DMA_TO_DEVICE);
 730                uap->dmatx.queued = false;
 731                uap->dmacr &= ~UART011_TXDMAE;
 732                writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 733        }
 734}
 735
 736static void pl011_dma_rx_callback(void *data);
 737
 738static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
 739{
 740        struct dma_chan *rxchan = uap->dmarx.chan;
 741        struct pl011_dmarx_data *dmarx = &uap->dmarx;
 742        struct dma_async_tx_descriptor *desc;
 743        struct pl011_sgbuf *sgbuf;
 744
 745        if (!rxchan)
 746                return -EIO;
 747
 748        /* Start the RX DMA job */
 749        sgbuf = uap->dmarx.use_buf_b ?
 750                &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
 751        desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
 752                                        DMA_DEV_TO_MEM,
 753                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 754        /*
 755         * If the DMA engine is busy and cannot prepare a
 756         * channel, no big deal, the driver will fall back
 757         * to interrupt mode as a result of this error code.
 758         */
 759        if (!desc) {
 760                uap->dmarx.running = false;
 761                dmaengine_terminate_all(rxchan);
 762                return -EBUSY;
 763        }
 764
 765        /* Some data to go along to the callback */
 766        desc->callback = pl011_dma_rx_callback;
 767        desc->callback_param = uap;
 768        dmarx->cookie = dmaengine_submit(desc);
 769        dma_async_issue_pending(rxchan);
 770
 771        uap->dmacr |= UART011_RXDMAE;
 772        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 773        uap->dmarx.running = true;
 774
 775        uap->im &= ~UART011_RXIM;
 776        writew(uap->im, uap->port.membase + UART011_IMSC);
 777
 778        return 0;
 779}
 780
 781/*
 782 * This is called when either the DMA job is complete, or
 783 * the FIFO timeout interrupt occurred. This must be called
 784 * with the port spinlock uap->port.lock held.
 785 */
 786static void pl011_dma_rx_chars(struct uart_amba_port *uap,
 787                               u32 pending, bool use_buf_b,
 788                               bool readfifo)
 789{
 790        struct tty_port *port = &uap->port.state->port;
 791        struct pl011_sgbuf *sgbuf = use_buf_b ?
 792                &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
 793        int dma_count = 0;
 794        u32 fifotaken = 0; /* only used for vdbg() */
 795
 796        struct pl011_dmarx_data *dmarx = &uap->dmarx;
 797        int dmataken = 0;
 798
 799        if (uap->dmarx.poll_rate) {
 800                /* The data can be taken by polling */
 801                dmataken = sgbuf->sg.length - dmarx->last_residue;
 802                /* Recalculate the pending size */
 803                if (pending >= dmataken)
 804                        pending -= dmataken;
 805        }
 806
 807        /* Pick the remain data from the DMA */
 808        if (pending) {
 809
 810                /*
 811                 * First take all chars in the DMA pipe, then look in the FIFO.
 812                 * Note that tty_insert_flip_buf() tries to take as many chars
 813                 * as it can.
 814                 */
 815                dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
 816                                pending);
 817
 818                uap->port.icount.rx += dma_count;
 819                if (dma_count < pending)
 820                        dev_warn(uap->port.dev,
 821                                 "couldn't insert all characters (TTY is full?)\n");
 822        }
 823
 824        /* Reset the last_residue for Rx DMA poll */
 825        if (uap->dmarx.poll_rate)
 826                dmarx->last_residue = sgbuf->sg.length;
 827
 828        /*
 829         * Only continue with trying to read the FIFO if all DMA chars have
 830         * been taken first.
 831         */
 832        if (dma_count == pending && readfifo) {
 833                /* Clear any error flags */
 834                writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
 835                       uap->port.membase + UART011_ICR);
 836
 837                /*
 838                 * If we read all the DMA'd characters, and we had an
 839                 * incomplete buffer, that could be due to an rx error, or
 840                 * maybe we just timed out. Read any pending chars and check
 841                 * the error status.
 842                 *
 843                 * Error conditions will only occur in the FIFO, these will
 844                 * trigger an immediate interrupt and stop the DMA job, so we
 845                 * will always find the error in the FIFO, never in the DMA
 846                 * buffer.
 847                 */
 848                fifotaken = pl011_fifo_to_tty(uap);
 849        }
 850
 851        spin_unlock(&uap->port.lock);
 852        dev_vdbg(uap->port.dev,
 853                 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
 854                 dma_count, fifotaken);
 855        tty_flip_buffer_push(port);
 856        spin_lock(&uap->port.lock);
 857}
 858
 859static void pl011_dma_rx_irq(struct uart_amba_port *uap)
 860{
 861        struct pl011_dmarx_data *dmarx = &uap->dmarx;
 862        struct dma_chan *rxchan = dmarx->chan;
 863        struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
 864                &dmarx->sgbuf_b : &dmarx->sgbuf_a;
 865        size_t pending;
 866        struct dma_tx_state state;
 867        enum dma_status dmastat;
 868
 869        /*
 870         * Pause the transfer so we can trust the current counter,
 871         * do this before we pause the PL011 block, else we may
 872         * overflow the FIFO.
 873         */
 874        if (dmaengine_pause(rxchan))
 875                dev_err(uap->port.dev, "unable to pause DMA transfer\n");
 876        dmastat = rxchan->device->device_tx_status(rxchan,
 877                                                   dmarx->cookie, &state);
 878        if (dmastat != DMA_PAUSED)
 879                dev_err(uap->port.dev, "unable to pause DMA transfer\n");
 880
 881        /* Disable RX DMA - incoming data will wait in the FIFO */
 882        uap->dmacr &= ~UART011_RXDMAE;
 883        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 884        uap->dmarx.running = false;
 885
 886        pending = sgbuf->sg.length - state.residue;
 887        BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
 888        /* Then we terminate the transfer - we now know our residue */
 889        dmaengine_terminate_all(rxchan);
 890
 891        /*
 892         * This will take the chars we have so far and insert
 893         * into the framework.
 894         */
 895        pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
 896
 897        /* Switch buffer & re-trigger DMA job */
 898        dmarx->use_buf_b = !dmarx->use_buf_b;
 899        if (pl011_dma_rx_trigger_dma(uap)) {
 900                dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
 901                        "fall back to interrupt mode\n");
 902                uap->im |= UART011_RXIM;
 903                writew(uap->im, uap->port.membase + UART011_IMSC);
 904        }
 905}
 906
 907static void pl011_dma_rx_callback(void *data)
 908{
 909        struct uart_amba_port *uap = data;
 910        struct pl011_dmarx_data *dmarx = &uap->dmarx;
 911        struct dma_chan *rxchan = dmarx->chan;
 912        bool lastbuf = dmarx->use_buf_b;
 913        struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
 914                &dmarx->sgbuf_b : &dmarx->sgbuf_a;
 915        size_t pending;
 916        struct dma_tx_state state;
 917        int ret;
 918
 919        /*
 920         * This completion interrupt occurs typically when the
 921         * RX buffer is totally stuffed but no timeout has yet
 922         * occurred. When that happens, we just want the RX
 923         * routine to flush out the secondary DMA buffer while
 924         * we immediately trigger the next DMA job.
 925         */
 926        spin_lock_irq(&uap->port.lock);
 927        /*
 928         * Rx data can be taken by the UART interrupts during
 929         * the DMA irq handler. So we check the residue here.
 930         */
 931        rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
 932        pending = sgbuf->sg.length - state.residue;
 933        BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
 934        /* Then we terminate the transfer - we now know our residue */
 935        dmaengine_terminate_all(rxchan);
 936
 937        uap->dmarx.running = false;
 938        dmarx->use_buf_b = !lastbuf;
 939        ret = pl011_dma_rx_trigger_dma(uap);
 940
 941        pl011_dma_rx_chars(uap, pending, lastbuf, false);
 942        spin_unlock_irq(&uap->port.lock);
 943        /*
 944         * Do this check after we picked the DMA chars so we don't
 945         * get some IRQ immediately from RX.
 946         */
 947        if (ret) {
 948                dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
 949                        "fall back to interrupt mode\n");
 950                uap->im |= UART011_RXIM;
 951                writew(uap->im, uap->port.membase + UART011_IMSC);
 952        }
 953}
 954
 955/*
 956 * Stop accepting received characters, when we're shutting down or
 957 * suspending this port.
 958 * Locking: called with port lock held and IRQs disabled.
 959 */
 960static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
 961{
 962        /* FIXME.  Just disable the DMA enable */
 963        uap->dmacr &= ~UART011_RXDMAE;
 964        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 965}
 966
 967/*
 968 * Timer handler for Rx DMA polling.
 969 * Every polling, It checks the residue in the dma buffer and transfer
 970 * data to the tty. Also, last_residue is updated for the next polling.
 971 */
 972static void pl011_dma_rx_poll(unsigned long args)
 973{
 974        struct uart_amba_port *uap = (struct uart_amba_port *)args;
 975        struct tty_port *port = &uap->port.state->port;
 976        struct pl011_dmarx_data *dmarx = &uap->dmarx;
 977        struct dma_chan *rxchan = uap->dmarx.chan;
 978        unsigned long flags = 0;
 979        unsigned int dmataken = 0;
 980        unsigned int size = 0;
 981        struct pl011_sgbuf *sgbuf;
 982        int dma_count;
 983        struct dma_tx_state state;
 984
 985        sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
 986        rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
 987        if (likely(state.residue < dmarx->last_residue)) {
 988                dmataken = sgbuf->sg.length - dmarx->last_residue;
 989                size = dmarx->last_residue - state.residue;
 990                dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
 991                                size);
 992                if (dma_count == size)
 993                        dmarx->last_residue =  state.residue;
 994                dmarx->last_jiffies = jiffies;
 995        }
 996        tty_flip_buffer_push(port);
 997
 998        /*
 999         * If no data is received in poll_timeout, the driver will fall back
1000         * to interrupt mode. We will retrigger DMA at the first interrupt.
1001         */
1002        if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1003                        > uap->dmarx.poll_timeout) {
1004
1005                spin_lock_irqsave(&uap->port.lock, flags);
1006                pl011_dma_rx_stop(uap);
1007                uap->im |= UART011_RXIM;
1008                writew(uap->im, uap->port.membase + UART011_IMSC);
1009                spin_unlock_irqrestore(&uap->port.lock, flags);
1010
1011                uap->dmarx.running = false;
1012                dmaengine_terminate_all(rxchan);
1013                del_timer(&uap->dmarx.timer);
1014        } else {
1015                mod_timer(&uap->dmarx.timer,
1016                        jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1017        }
1018}
1019
1020static void pl011_dma_startup(struct uart_amba_port *uap)
1021{
1022        int ret;
1023
1024        if (!uap->dmatx.chan)
1025                return;
1026
1027        uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
1028        if (!uap->dmatx.buf) {
1029                dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1030                uap->port.fifosize = uap->fifosize;
1031                return;
1032        }
1033
1034        sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1035
1036        /* The DMA buffer is now the FIFO the TTY subsystem can use */
1037        uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1038        uap->using_tx_dma = true;
1039
1040        if (!uap->dmarx.chan)
1041                goto skip_rx;
1042
1043        /* Allocate and map DMA RX buffers */
1044        ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1045                               DMA_FROM_DEVICE);
1046        if (ret) {
1047                dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1048                        "RX buffer A", ret);
1049                goto skip_rx;
1050        }
1051
1052        ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1053                               DMA_FROM_DEVICE);
1054        if (ret) {
1055                dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1056                        "RX buffer B", ret);
1057                pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1058                                 DMA_FROM_DEVICE);
1059                goto skip_rx;
1060        }
1061
1062        uap->using_rx_dma = true;
1063
1064skip_rx:
1065        /* Turn on DMA error (RX/TX will be enabled on demand) */
1066        uap->dmacr |= UART011_DMAONERR;
1067        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
1068
1069        /*
1070         * ST Micro variants has some specific dma burst threshold
1071         * compensation. Set this to 16 bytes, so burst will only
1072         * be issued above/below 16 bytes.
1073         */
1074        if (uap->vendor->dma_threshold)
1075                writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1076                               uap->port.membase + ST_UART011_DMAWM);
1077
1078        if (uap->using_rx_dma) {
1079                if (pl011_dma_rx_trigger_dma(uap))
1080                        dev_dbg(uap->port.dev, "could not trigger initial "
1081                                "RX DMA job, fall back to interrupt mode\n");
1082                if (uap->dmarx.poll_rate) {
1083                        init_timer(&(uap->dmarx.timer));
1084                        uap->dmarx.timer.function = pl011_dma_rx_poll;
1085                        uap->dmarx.timer.data = (unsigned long)uap;
1086                        mod_timer(&uap->dmarx.timer,
1087                                jiffies +
1088                                msecs_to_jiffies(uap->dmarx.poll_rate));
1089                        uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1090                        uap->dmarx.last_jiffies = jiffies;
1091                }
1092        }
1093}
1094
1095static void pl011_dma_shutdown(struct uart_amba_port *uap)
1096{
1097        if (!(uap->using_tx_dma || uap->using_rx_dma))
1098                return;
1099
1100        /* Disable RX and TX DMA */
1101        while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
1102                barrier();
1103
1104        spin_lock_irq(&uap->port.lock);
1105        uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1106        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
1107        spin_unlock_irq(&uap->port.lock);
1108
1109        if (uap->using_tx_dma) {
1110                /* In theory, this should already be done by pl011_dma_flush_buffer */
1111                dmaengine_terminate_all(uap->dmatx.chan);
1112                if (uap->dmatx.queued) {
1113                        dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1114                                     DMA_TO_DEVICE);
1115                        uap->dmatx.queued = false;
1116                }
1117
1118                kfree(uap->dmatx.buf);
1119                uap->using_tx_dma = false;
1120        }
1121
1122        if (uap->using_rx_dma) {
1123                dmaengine_terminate_all(uap->dmarx.chan);
1124                /* Clean up the RX DMA */
1125                pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1126                pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1127                if (uap->dmarx.poll_rate)
1128                        del_timer_sync(&uap->dmarx.timer);
1129                uap->using_rx_dma = false;
1130        }
1131}
1132
1133static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1134{
1135        return uap->using_rx_dma;
1136}
1137
1138static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1139{
1140        return uap->using_rx_dma && uap->dmarx.running;
1141}
1142
1143#else
1144/* Blank functions if the DMA engine is not available */
1145static inline void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
1146{
1147}
1148
1149static inline void pl011_dma_remove(struct uart_amba_port *uap)
1150{
1151}
1152
1153static inline void pl011_dma_startup(struct uart_amba_port *uap)
1154{
1155}
1156
1157static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1158{
1159}
1160
1161static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1162{
1163        return false;
1164}
1165
1166static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1167{
1168}
1169
1170static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1171{
1172        return false;
1173}
1174
1175static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1176{
1177}
1178
1179static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1180{
1181}
1182
1183static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1184{
1185        return -EIO;
1186}
1187
1188static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1189{
1190        return false;
1191}
1192
1193static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1194{
1195        return false;
1196}
1197
1198#define pl011_dma_flush_buffer  NULL
1199#endif
1200
1201static void pl011_stop_tx(struct uart_port *port)
1202{
1203        struct uart_amba_port *uap =
1204            container_of(port, struct uart_amba_port, port);
1205
1206        uap->im &= ~UART011_TXIM;
1207        writew(uap->im, uap->port.membase + UART011_IMSC);
1208        pl011_dma_tx_stop(uap);
1209}
1210
1211static void pl011_start_tx(struct uart_port *port)
1212{
1213        struct uart_amba_port *uap =
1214            container_of(port, struct uart_amba_port, port);
1215
1216        if (!pl011_dma_tx_start(uap)) {
1217                uap->im |= UART011_TXIM;
1218                writew(uap->im, uap->port.membase + UART011_IMSC);
1219        }
1220}
1221
1222static void pl011_stop_rx(struct uart_port *port)
1223{
1224        struct uart_amba_port *uap =
1225            container_of(port, struct uart_amba_port, port);
1226
1227        uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1228                     UART011_PEIM|UART011_BEIM|UART011_OEIM);
1229        writew(uap->im, uap->port.membase + UART011_IMSC);
1230
1231        pl011_dma_rx_stop(uap);
1232}
1233
1234static void pl011_enable_ms(struct uart_port *port)
1235{
1236        struct uart_amba_port *uap =
1237            container_of(port, struct uart_amba_port, port);
1238
1239        uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1240        writew(uap->im, uap->port.membase + UART011_IMSC);
1241}
1242
1243static void pl011_rx_chars(struct uart_amba_port *uap)
1244__releases(&uap->port.lock)
1245__acquires(&uap->port.lock)
1246{
1247        pl011_fifo_to_tty(uap);
1248
1249        spin_unlock(&uap->port.lock);
1250        tty_flip_buffer_push(&uap->port.state->port);
1251        /*
1252         * If we were temporarily out of DMA mode for a while,
1253         * attempt to switch back to DMA mode again.
1254         */
1255        if (pl011_dma_rx_available(uap)) {
1256                if (pl011_dma_rx_trigger_dma(uap)) {
1257                        dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1258                                "fall back to interrupt mode again\n");
1259                        uap->im |= UART011_RXIM;
1260                        writew(uap->im, uap->port.membase + UART011_IMSC);
1261                } else {
1262#ifdef CONFIG_DMA_ENGINE
1263                        /* Start Rx DMA poll */
1264                        if (uap->dmarx.poll_rate) {
1265                                uap->dmarx.last_jiffies = jiffies;
1266                                uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1267                                mod_timer(&uap->dmarx.timer,
1268                                        jiffies +
1269                                        msecs_to_jiffies(uap->dmarx.poll_rate));
1270                        }
1271#endif
1272                }
1273        }
1274        spin_lock(&uap->port.lock);
1275}
1276
1277static void pl011_tx_chars(struct uart_amba_port *uap)
1278{
1279        struct circ_buf *xmit = &uap->port.state->xmit;
1280        int count;
1281
1282        if (uap->port.x_char) {
1283                writew(uap->port.x_char, uap->port.membase + UART01x_DR);
1284                uap->port.icount.tx++;
1285                uap->port.x_char = 0;
1286                return;
1287        }
1288        if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1289                pl011_stop_tx(&uap->port);
1290                return;
1291        }
1292
1293        /* If we are using DMA mode, try to send some characters. */
1294        if (pl011_dma_tx_irq(uap))
1295                return;
1296
1297        count = uap->fifosize >> 1;
1298        do {
1299                writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR);
1300                xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1301                uap->port.icount.tx++;
1302                if (uart_circ_empty(xmit))
1303                        break;
1304        } while (--count > 0);
1305
1306        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1307                uart_write_wakeup(&uap->port);
1308
1309        if (uart_circ_empty(xmit))
1310                pl011_stop_tx(&uap->port);
1311}
1312
1313static void pl011_modem_status(struct uart_amba_port *uap)
1314{
1315        unsigned int status, delta;
1316
1317        status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1318
1319        delta = status ^ uap->old_status;
1320        uap->old_status = status;
1321
1322        if (!delta)
1323                return;
1324
1325        if (delta & UART01x_FR_DCD)
1326                uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1327
1328        if (delta & UART01x_FR_DSR)
1329                uap->port.icount.dsr++;
1330
1331        if (delta & UART01x_FR_CTS)
1332                uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
1333
1334        wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1335}
1336
1337static irqreturn_t pl011_int(int irq, void *dev_id)
1338{
1339        struct uart_amba_port *uap = dev_id;
1340        unsigned long flags;
1341        unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1342        int handled = 0;
1343        unsigned int dummy_read;
1344
1345        spin_lock_irqsave(&uap->port.lock, flags);
1346        status = readw(uap->port.membase + UART011_MIS);
1347        if (status) {
1348                do {
1349                        if (uap->vendor->cts_event_workaround) {
1350                                /* workaround to make sure that all bits are unlocked.. */
1351                                writew(0x00, uap->port.membase + UART011_ICR);
1352
1353                                /*
1354                                 * WA: introduce 26ns(1 uart clk) delay before W1C;
1355                                 * single apb access will incur 2 pclk(133.12Mhz) delay,
1356                                 * so add 2 dummy reads
1357                                 */
1358                                dummy_read = readw(uap->port.membase + UART011_ICR);
1359                                dummy_read = readw(uap->port.membase + UART011_ICR);
1360                        }
1361
1362                        writew(status & ~(UART011_TXIS|UART011_RTIS|
1363                                          UART011_RXIS),
1364                               uap->port.membase + UART011_ICR);
1365
1366                        if (status & (UART011_RTIS|UART011_RXIS)) {
1367                                if (pl011_dma_rx_running(uap))
1368                                        pl011_dma_rx_irq(uap);
1369                                else
1370                                        pl011_rx_chars(uap);
1371                        }
1372                        if (status & (UART011_DSRMIS|UART011_DCDMIS|
1373                                      UART011_CTSMIS|UART011_RIMIS))
1374                                pl011_modem_status(uap);
1375                        if (status & UART011_TXIS)
1376                                pl011_tx_chars(uap);
1377
1378                        if (pass_counter-- == 0)
1379                                break;
1380
1381                        status = readw(uap->port.membase + UART011_MIS);
1382                } while (status != 0);
1383                handled = 1;
1384        }
1385
1386        spin_unlock_irqrestore(&uap->port.lock, flags);
1387
1388        return IRQ_RETVAL(handled);
1389}
1390
1391static unsigned int pl011_tx_empty(struct uart_port *port)
1392{
1393        struct uart_amba_port *uap =
1394            container_of(port, struct uart_amba_port, port);
1395        unsigned int status = readw(uap->port.membase + UART01x_FR);
1396        return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
1397}
1398
1399static unsigned int pl011_get_mctrl(struct uart_port *port)
1400{
1401        struct uart_amba_port *uap =
1402            container_of(port, struct uart_amba_port, port);
1403        unsigned int result = 0;
1404        unsigned int status = readw(uap->port.membase + UART01x_FR);
1405
1406#define TIOCMBIT(uartbit, tiocmbit)     \
1407        if (status & uartbit)           \
1408                result |= tiocmbit
1409
1410        TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1411        TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
1412        TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
1413        TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
1414#undef TIOCMBIT
1415        return result;
1416}
1417
1418static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1419{
1420        struct uart_amba_port *uap =
1421            container_of(port, struct uart_amba_port, port);
1422        unsigned int cr;
1423
1424        cr = readw(uap->port.membase + UART011_CR);
1425
1426#define TIOCMBIT(tiocmbit, uartbit)             \
1427        if (mctrl & tiocmbit)           \
1428                cr |= uartbit;          \
1429        else                            \
1430                cr &= ~uartbit
1431
1432        TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1433        TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1434        TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1435        TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1436        TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1437
1438        if (uap->autorts) {
1439                /* We need to disable auto-RTS if we want to turn RTS off */
1440                TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1441        }
1442#undef TIOCMBIT
1443
1444        writew(cr, uap->port.membase + UART011_CR);
1445}
1446
1447static void pl011_break_ctl(struct uart_port *port, int break_state)
1448{
1449        struct uart_amba_port *uap =
1450            container_of(port, struct uart_amba_port, port);
1451        unsigned long flags;
1452        unsigned int lcr_h;
1453
1454        spin_lock_irqsave(&uap->port.lock, flags);
1455        lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1456        if (break_state == -1)
1457                lcr_h |= UART01x_LCRH_BRK;
1458        else
1459                lcr_h &= ~UART01x_LCRH_BRK;
1460        writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1461        spin_unlock_irqrestore(&uap->port.lock, flags);
1462}
1463
1464#ifdef CONFIG_CONSOLE_POLL
1465
1466static void pl011_quiesce_irqs(struct uart_port *port)
1467{
1468        struct uart_amba_port *uap =
1469            container_of(port, struct uart_amba_port, port);
1470        unsigned char __iomem *regs = uap->port.membase;
1471
1472        writew(readw(regs + UART011_MIS), regs + UART011_ICR);
1473        /*
1474         * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1475         * we simply mask it. start_tx() will unmask it.
1476         *
1477         * Note we can race with start_tx(), and if the race happens, the
1478         * polling user might get another interrupt just after we clear it.
1479         * But it should be OK and can happen even w/o the race, e.g.
1480         * controller immediately got some new data and raised the IRQ.
1481         *
1482         * And whoever uses polling routines assumes that it manages the device
1483         * (including tx queue), so we're also fine with start_tx()'s caller
1484         * side.
1485         */
1486        writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC);
1487}
1488
1489static int pl011_get_poll_char(struct uart_port *port)
1490{
1491        struct uart_amba_port *uap =
1492            container_of(port, struct uart_amba_port, port);
1493        unsigned int status;
1494
1495        /*
1496         * The caller might need IRQs lowered, e.g. if used with KDB NMI
1497         * debugger.
1498         */
1499        pl011_quiesce_irqs(port);
1500
1501        status = readw(uap->port.membase + UART01x_FR);
1502        if (status & UART01x_FR_RXFE)
1503                return NO_POLL_CHAR;
1504
1505        return readw(uap->port.membase + UART01x_DR);
1506}
1507
1508static void pl011_put_poll_char(struct uart_port *port,
1509                         unsigned char ch)
1510{
1511        struct uart_amba_port *uap =
1512            container_of(port, struct uart_amba_port, port);
1513
1514        while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1515                barrier();
1516
1517        writew(ch, uap->port.membase + UART01x_DR);
1518}
1519
1520#endif /* CONFIG_CONSOLE_POLL */
1521
1522static int pl011_hwinit(struct uart_port *port)
1523{
1524        struct uart_amba_port *uap =
1525            container_of(port, struct uart_amba_port, port);
1526        int retval;
1527
1528        /* Optionaly enable pins to be muxed in and configured */
1529        pinctrl_pm_select_default_state(port->dev);
1530
1531        /*
1532         * Try to enable the clock producer.
1533         */
1534        retval = clk_prepare_enable(uap->clk);
1535        if (retval)
1536                return retval;
1537
1538        uap->port.uartclk = clk_get_rate(uap->clk);
1539
1540        /* Clear pending error and receive interrupts */
1541        writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
1542               UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
1543
1544        /*
1545         * Save interrupts enable mask, and enable RX interrupts in case if
1546         * the interrupt is used for NMI entry.
1547         */
1548        uap->im = readw(uap->port.membase + UART011_IMSC);
1549        writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC);
1550
1551        if (dev_get_platdata(uap->port.dev)) {
1552                struct amba_pl011_data *plat;
1553
1554                plat = dev_get_platdata(uap->port.dev);
1555                if (plat->init)
1556                        plat->init();
1557        }
1558        return 0;
1559}
1560
1561static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1562{
1563        writew(lcr_h, uap->port.membase + uap->lcrh_rx);
1564        if (uap->lcrh_rx != uap->lcrh_tx) {
1565                int i;
1566                /*
1567                 * Wait 10 PCLKs before writing LCRH_TX register,
1568                 * to get this delay write read only register 10 times
1569                 */
1570                for (i = 0; i < 10; ++i)
1571                        writew(0xff, uap->port.membase + UART011_MIS);
1572                writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1573        }
1574}
1575
1576static int pl011_startup(struct uart_port *port)
1577{
1578        struct uart_amba_port *uap =
1579            container_of(port, struct uart_amba_port, port);
1580        unsigned int cr, lcr_h, fbrd, ibrd;
1581        int retval;
1582
1583        retval = pl011_hwinit(port);
1584        if (retval)
1585                goto clk_dis;
1586
1587        writew(uap->im, uap->port.membase + UART011_IMSC);
1588
1589        /*
1590         * Allocate the IRQ
1591         */
1592        retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1593        if (retval)
1594                goto clk_dis;
1595
1596        writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
1597
1598        /*
1599         * Provoke TX FIFO interrupt into asserting. Taking care to preserve
1600         * baud rate and data format specified by FBRD, IBRD and LCRH as the
1601         * UART may already be in use as a console.
1602         */
1603        spin_lock_irq(&uap->port.lock);
1604
1605        fbrd = readw(uap->port.membase + UART011_FBRD);
1606        ibrd = readw(uap->port.membase + UART011_IBRD);
1607        lcr_h = readw(uap->port.membase + uap->lcrh_rx);
1608
1609        cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
1610        writew(cr, uap->port.membase + UART011_CR);
1611        writew(0, uap->port.membase + UART011_FBRD);
1612        writew(1, uap->port.membase + UART011_IBRD);
1613        pl011_write_lcr_h(uap, 0);
1614        writew(0, uap->port.membase + UART01x_DR);
1615        while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
1616                barrier();
1617
1618        writew(fbrd, uap->port.membase + UART011_FBRD);
1619        writew(ibrd, uap->port.membase + UART011_IBRD);
1620        pl011_write_lcr_h(uap, lcr_h);
1621
1622        /* restore RTS and DTR */
1623        cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1624        cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1625        writew(cr, uap->port.membase + UART011_CR);
1626
1627        spin_unlock_irq(&uap->port.lock);
1628
1629        /*
1630         * initialise the old status of the modem signals
1631         */
1632        uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1633
1634        /* Startup DMA */
1635        pl011_dma_startup(uap);
1636
1637        /*
1638         * Finally, enable interrupts, only timeouts when using DMA
1639         * if initial RX DMA job failed, start in interrupt mode
1640         * as well.
1641         */
1642        spin_lock_irq(&uap->port.lock);
1643        /* Clear out any spuriously appearing RX interrupts */
1644         writew(UART011_RTIS | UART011_RXIS,
1645                uap->port.membase + UART011_ICR);
1646        uap->im = UART011_RTIM;
1647        if (!pl011_dma_rx_running(uap))
1648                uap->im |= UART011_RXIM;
1649        writew(uap->im, uap->port.membase + UART011_IMSC);
1650        spin_unlock_irq(&uap->port.lock);
1651
1652        return 0;
1653
1654 clk_dis:
1655        clk_disable_unprepare(uap->clk);
1656        return retval;
1657}
1658
1659static void pl011_shutdown_channel(struct uart_amba_port *uap,
1660                                        unsigned int lcrh)
1661{
1662      unsigned long val;
1663
1664      val = readw(uap->port.membase + lcrh);
1665      val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1666      writew(val, uap->port.membase + lcrh);
1667}
1668
1669static void pl011_shutdown(struct uart_port *port)
1670{
1671        struct uart_amba_port *uap =
1672            container_of(port, struct uart_amba_port, port);
1673        unsigned int cr;
1674
1675        /*
1676         * disable all interrupts
1677         */
1678        spin_lock_irq(&uap->port.lock);
1679        uap->im = 0;
1680        writew(uap->im, uap->port.membase + UART011_IMSC);
1681        writew(0xffff, uap->port.membase + UART011_ICR);
1682        spin_unlock_irq(&uap->port.lock);
1683
1684        pl011_dma_shutdown(uap);
1685
1686        /*
1687         * Free the interrupt
1688         */
1689        free_irq(uap->port.irq, uap);
1690
1691        /*
1692         * disable the port
1693         * disable the port. It should not disable RTS and DTR.
1694         * Also RTS and DTR state should be preserved to restore
1695         * it during startup().
1696         */
1697        uap->autorts = false;
1698        spin_lock_irq(&uap->port.lock);
1699        cr = readw(uap->port.membase + UART011_CR);
1700        uap->old_cr = cr;
1701        cr &= UART011_CR_RTS | UART011_CR_DTR;
1702        cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1703        writew(cr, uap->port.membase + UART011_CR);
1704        spin_unlock_irq(&uap->port.lock);
1705
1706        /*
1707         * disable break condition and fifos
1708         */
1709        pl011_shutdown_channel(uap, uap->lcrh_rx);
1710        if (uap->lcrh_rx != uap->lcrh_tx)
1711                pl011_shutdown_channel(uap, uap->lcrh_tx);
1712
1713        /*
1714         * Shut down the clock producer
1715         */
1716        clk_disable_unprepare(uap->clk);
1717        /* Optionally let pins go into sleep states */
1718        pinctrl_pm_select_sleep_state(port->dev);
1719
1720        if (dev_get_platdata(uap->port.dev)) {
1721                struct amba_pl011_data *plat;
1722
1723                plat = dev_get_platdata(uap->port.dev);
1724                if (plat->exit)
1725                        plat->exit();
1726        }
1727
1728        if (uap->port.ops->flush_buffer)
1729                uap->port.ops->flush_buffer(port);
1730}
1731
1732static void
1733pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1734                     struct ktermios *old)
1735{
1736        struct uart_amba_port *uap =
1737            container_of(port, struct uart_amba_port, port);
1738        unsigned int lcr_h, old_cr;
1739        unsigned long flags;
1740        unsigned int baud, quot, clkdiv;
1741
1742        if (uap->vendor->oversampling)
1743                clkdiv = 8;
1744        else
1745                clkdiv = 16;
1746
1747        /*
1748         * Ask the core to calculate the divisor for us.
1749         */
1750        baud = uart_get_baud_rate(port, termios, old, 0,
1751                                  port->uartclk / clkdiv);
1752#ifdef CONFIG_DMA_ENGINE
1753        /*
1754         * Adjust RX DMA polling rate with baud rate if not specified.
1755         */
1756        if (uap->dmarx.auto_poll_rate)
1757                uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
1758#endif
1759
1760        if (baud > port->uartclk/16)
1761                quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1762        else
1763                quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1764
1765        switch (termios->c_cflag & CSIZE) {
1766        case CS5:
1767                lcr_h = UART01x_LCRH_WLEN_5;
1768                break;
1769        case CS6:
1770                lcr_h = UART01x_LCRH_WLEN_6;
1771                break;
1772        case CS7:
1773                lcr_h = UART01x_LCRH_WLEN_7;
1774                break;
1775        default: // CS8
1776                lcr_h = UART01x_LCRH_WLEN_8;
1777                break;
1778        }
1779        if (termios->c_cflag & CSTOPB)
1780                lcr_h |= UART01x_LCRH_STP2;
1781        if (termios->c_cflag & PARENB) {
1782                lcr_h |= UART01x_LCRH_PEN;
1783                if (!(termios->c_cflag & PARODD))
1784                        lcr_h |= UART01x_LCRH_EPS;
1785        }
1786        if (uap->fifosize > 1)
1787                lcr_h |= UART01x_LCRH_FEN;
1788
1789        spin_lock_irqsave(&port->lock, flags);
1790
1791        /*
1792         * Update the per-port timeout.
1793         */
1794        uart_update_timeout(port, termios->c_cflag, baud);
1795
1796        port->read_status_mask = UART011_DR_OE | 255;
1797        if (termios->c_iflag & INPCK)
1798                port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1799        if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1800                port->read_status_mask |= UART011_DR_BE;
1801
1802        /*
1803         * Characters to ignore
1804         */
1805        port->ignore_status_mask = 0;
1806        if (termios->c_iflag & IGNPAR)
1807                port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1808        if (termios->c_iflag & IGNBRK) {
1809                port->ignore_status_mask |= UART011_DR_BE;
1810                /*
1811                 * If we're ignoring parity and break indicators,
1812                 * ignore overruns too (for real raw support).
1813                 */
1814                if (termios->c_iflag & IGNPAR)
1815                        port->ignore_status_mask |= UART011_DR_OE;
1816        }
1817
1818        /*
1819         * Ignore all characters if CREAD is not set.
1820         */
1821        if ((termios->c_cflag & CREAD) == 0)
1822                port->ignore_status_mask |= UART_DUMMY_DR_RX;
1823
1824        if (UART_ENABLE_MS(port, termios->c_cflag))
1825                pl011_enable_ms(port);
1826
1827        /* first, disable everything */
1828        old_cr = readw(port->membase + UART011_CR);
1829        writew(0, port->membase + UART011_CR);
1830
1831        if (termios->c_cflag & CRTSCTS) {
1832                if (old_cr & UART011_CR_RTS)
1833                        old_cr |= UART011_CR_RTSEN;
1834
1835                old_cr |= UART011_CR_CTSEN;
1836                uap->autorts = true;
1837        } else {
1838                old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
1839                uap->autorts = false;
1840        }
1841
1842        if (uap->vendor->oversampling) {
1843                if (baud > port->uartclk / 16)
1844                        old_cr |= ST_UART011_CR_OVSFACT;
1845                else
1846                        old_cr &= ~ST_UART011_CR_OVSFACT;
1847        }
1848
1849        /*
1850         * Workaround for the ST Micro oversampling variants to
1851         * increase the bitrate slightly, by lowering the divisor,
1852         * to avoid delayed sampling of start bit at high speeds,
1853         * else we see data corruption.
1854         */
1855        if (uap->vendor->oversampling) {
1856                if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
1857                        quot -= 1;
1858                else if ((baud > 3250000) && (quot > 2))
1859                        quot -= 2;
1860        }
1861        /* Set baud rate */
1862        writew(quot & 0x3f, port->membase + UART011_FBRD);
1863        writew(quot >> 6, port->membase + UART011_IBRD);
1864
1865        /*
1866         * ----------v----------v----------v----------v-----
1867         * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
1868         * UART011_FBRD & UART011_IBRD.
1869         * ----------^----------^----------^----------^-----
1870         */
1871        pl011_write_lcr_h(uap, lcr_h);
1872        writew(old_cr, port->membase + UART011_CR);
1873
1874        spin_unlock_irqrestore(&port->lock, flags);
1875}
1876
1877static const char *pl011_type(struct uart_port *port)
1878{
1879        struct uart_amba_port *uap =
1880            container_of(port, struct uart_amba_port, port);
1881        return uap->port.type == PORT_AMBA ? uap->type : NULL;
1882}
1883
1884/*
1885 * Release the memory region(s) being used by 'port'
1886 */
1887static void pl011_release_port(struct uart_port *port)
1888{
1889        release_mem_region(port->mapbase, SZ_4K);
1890}
1891
1892/*
1893 * Request the memory region(s) being used by 'port'
1894 */
1895static int pl011_request_port(struct uart_port *port)
1896{
1897        return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
1898                        != NULL ? 0 : -EBUSY;
1899}
1900
1901/*
1902 * Configure/autoconfigure the port.
1903 */
1904static void pl011_config_port(struct uart_port *port, int flags)
1905{
1906        if (flags & UART_CONFIG_TYPE) {
1907                port->type = PORT_AMBA;
1908                pl011_request_port(port);
1909        }
1910}
1911
1912/*
1913 * verify the new serial_struct (for TIOCSSERIAL).
1914 */
1915static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
1916{
1917        int ret = 0;
1918        if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
1919                ret = -EINVAL;
1920        if (ser->irq < 0 || ser->irq >= nr_irqs)
1921                ret = -EINVAL;
1922        if (ser->baud_base < 9600)
1923                ret = -EINVAL;
1924        return ret;
1925}
1926
1927static struct uart_ops amba_pl011_pops = {
1928        .tx_empty       = pl011_tx_empty,
1929        .set_mctrl      = pl011_set_mctrl,
1930        .get_mctrl      = pl011_get_mctrl,
1931        .stop_tx        = pl011_stop_tx,
1932        .start_tx       = pl011_start_tx,
1933        .stop_rx        = pl011_stop_rx,
1934        .enable_ms      = pl011_enable_ms,
1935        .break_ctl      = pl011_break_ctl,
1936        .startup        = pl011_startup,
1937        .shutdown       = pl011_shutdown,
1938        .flush_buffer   = pl011_dma_flush_buffer,
1939        .set_termios    = pl011_set_termios,
1940        .type           = pl011_type,
1941        .release_port   = pl011_release_port,
1942        .request_port   = pl011_request_port,
1943        .config_port    = pl011_config_port,
1944        .verify_port    = pl011_verify_port,
1945#ifdef CONFIG_CONSOLE_POLL
1946        .poll_init     = pl011_hwinit,
1947        .poll_get_char = pl011_get_poll_char,
1948        .poll_put_char = pl011_put_poll_char,
1949#endif
1950};
1951
1952static struct uart_amba_port *amba_ports[UART_NR];
1953
1954#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
1955
1956static void pl011_console_putchar(struct uart_port *port, int ch)
1957{
1958        struct uart_amba_port *uap =
1959            container_of(port, struct uart_amba_port, port);
1960
1961        while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1962                barrier();
1963        writew(ch, uap->port.membase + UART01x_DR);
1964}
1965
1966static void
1967pl011_console_write(struct console *co, const char *s, unsigned int count)
1968{
1969        struct uart_amba_port *uap = amba_ports[co->index];
1970        unsigned int status, old_cr, new_cr;
1971        unsigned long flags;
1972        int locked = 1;
1973
1974        clk_enable(uap->clk);
1975
1976        local_irq_save(flags);
1977        if (uap->port.sysrq)
1978                locked = 0;
1979        else if (oops_in_progress)
1980                locked = spin_trylock(&uap->port.lock);
1981        else
1982                spin_lock(&uap->port.lock);
1983
1984        /*
1985         *      First save the CR then disable the interrupts
1986         */
1987        old_cr = readw(uap->port.membase + UART011_CR);
1988        new_cr = old_cr & ~UART011_CR_CTSEN;
1989        new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1990        writew(new_cr, uap->port.membase + UART011_CR);
1991
1992        uart_console_write(&uap->port, s, count, pl011_console_putchar);
1993
1994        /*
1995         *      Finally, wait for transmitter to become empty
1996         *      and restore the TCR
1997         */
1998        do {
1999                status = readw(uap->port.membase + UART01x_FR);
2000        } while (status & UART01x_FR_BUSY);
2001        writew(old_cr, uap->port.membase + UART011_CR);
2002
2003        if (locked)
2004                spin_unlock(&uap->port.lock);
2005        local_irq_restore(flags);
2006
2007        clk_disable(uap->clk);
2008}
2009
2010static void __init
2011pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2012                             int *parity, int *bits)
2013{
2014        if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) {
2015                unsigned int lcr_h, ibrd, fbrd;
2016
2017                lcr_h = readw(uap->port.membase + uap->lcrh_tx);
2018
2019                *parity = 'n';
2020                if (lcr_h & UART01x_LCRH_PEN) {
2021                        if (lcr_h & UART01x_LCRH_EPS)
2022                                *parity = 'e';
2023                        else
2024                                *parity = 'o';
2025                }
2026
2027                if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2028                        *bits = 7;
2029                else
2030                        *bits = 8;
2031
2032                ibrd = readw(uap->port.membase + UART011_IBRD);
2033                fbrd = readw(uap->port.membase + UART011_FBRD);
2034
2035                *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2036
2037                if (uap->vendor->oversampling) {
2038                        if (readw(uap->port.membase + UART011_CR)
2039                                  & ST_UART011_CR_OVSFACT)
2040                                *baud *= 2;
2041                }
2042        }
2043}
2044
2045static int __init pl011_console_setup(struct console *co, char *options)
2046{
2047        struct uart_amba_port *uap;
2048        int baud = 38400;
2049        int bits = 8;
2050        int parity = 'n';
2051        int flow = 'n';
2052        int ret;
2053
2054        /*
2055         * Check whether an invalid uart number has been specified, and
2056         * if so, search for the first available port that does have
2057         * console support.
2058         */
2059        if (co->index >= UART_NR)
2060                co->index = 0;
2061        uap = amba_ports[co->index];
2062        if (!uap)
2063                return -ENODEV;
2064
2065        /* Allow pins to be muxed in and configured */
2066        pinctrl_pm_select_default_state(uap->port.dev);
2067
2068        ret = clk_prepare(uap->clk);
2069        if (ret)
2070                return ret;
2071
2072        if (dev_get_platdata(uap->port.dev)) {
2073                struct amba_pl011_data *plat;
2074
2075                plat = dev_get_platdata(uap->port.dev);
2076                if (plat->init)
2077                        plat->init();
2078        }
2079
2080        uap->port.uartclk = clk_get_rate(uap->clk);
2081
2082        if (options)
2083                uart_parse_options(options, &baud, &parity, &bits, &flow);
2084        else
2085                pl011_console_get_options(uap, &baud, &parity, &bits);
2086
2087        return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2088}
2089
2090static struct uart_driver amba_reg;
2091static struct console amba_console = {
2092        .name           = "ttyAMA",
2093        .write          = pl011_console_write,
2094        .device         = uart_console_device,
2095        .setup          = pl011_console_setup,
2096        .flags          = CON_PRINTBUFFER,
2097        .index          = -1,
2098        .data           = &amba_reg,
2099};
2100
2101#define AMBA_CONSOLE    (&amba_console)
2102
2103static void pl011_putc(struct uart_port *port, int c)
2104{
2105        while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2106                ;
2107        writeb(c, port->membase + UART01x_DR);
2108        while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2109                ;
2110}
2111
2112static void pl011_early_write(struct console *con, const char *s, unsigned n)
2113{
2114        struct earlycon_device *dev = con->data;
2115
2116        uart_console_write(&dev->port, s, n, pl011_putc);
2117}
2118
2119static int __init pl011_early_console_setup(struct earlycon_device *device,
2120                                            const char *opt)
2121{
2122        if (!device->port.membase)
2123                return -ENODEV;
2124
2125        device->con->write = pl011_early_write;
2126        return 0;
2127}
2128EARLYCON_DECLARE(pl011, pl011_early_console_setup);
2129OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2130
2131#else
2132#define AMBA_CONSOLE    NULL
2133#endif
2134
2135static struct uart_driver amba_reg = {
2136        .owner                  = THIS_MODULE,
2137        .driver_name            = "ttyAMA",
2138        .dev_name               = "ttyAMA",
2139        .major                  = SERIAL_AMBA_MAJOR,
2140        .minor                  = SERIAL_AMBA_MINOR,
2141        .nr                     = UART_NR,
2142        .cons                   = AMBA_CONSOLE,
2143};
2144
2145static int pl011_probe_dt_alias(int index, struct device *dev)
2146{
2147        struct device_node *np;
2148        static bool seen_dev_with_alias = false;
2149        static bool seen_dev_without_alias = false;
2150        int ret = index;
2151
2152        if (!IS_ENABLED(CONFIG_OF))
2153                return ret;
2154
2155        np = dev->of_node;
2156        if (!np)
2157                return ret;
2158
2159        ret = of_alias_get_id(np, "serial");
2160        if (IS_ERR_VALUE(ret)) {
2161                seen_dev_without_alias = true;
2162                ret = index;
2163        } else {
2164                seen_dev_with_alias = true;
2165                if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2166                        dev_warn(dev, "requested serial port %d  not available.\n", ret);
2167                        ret = index;
2168                }
2169        }
2170
2171        if (seen_dev_with_alias && seen_dev_without_alias)
2172                dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2173
2174        return ret;
2175}
2176
2177static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2178{
2179        struct uart_amba_port *uap;
2180        struct vendor_data *vendor = id->data;
2181        void __iomem *base;
2182        int i, ret;
2183
2184        for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2185                if (amba_ports[i] == NULL)
2186                        break;
2187
2188        if (i == ARRAY_SIZE(amba_ports))
2189                return -EBUSY;
2190
2191        uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2192                           GFP_KERNEL);
2193        if (uap == NULL)
2194                return -ENOMEM;
2195
2196        i = pl011_probe_dt_alias(i, &dev->dev);
2197
2198        base = devm_ioremap(&dev->dev, dev->res.start,
2199                            resource_size(&dev->res));
2200        if (!base)
2201                return -ENOMEM;
2202
2203        uap->clk = devm_clk_get(&dev->dev, NULL);
2204        if (IS_ERR(uap->clk))
2205                return PTR_ERR(uap->clk);
2206
2207        uap->vendor = vendor;
2208        uap->lcrh_rx = vendor->lcrh_rx;
2209        uap->lcrh_tx = vendor->lcrh_tx;
2210        uap->old_cr = 0;
2211        uap->fifosize = vendor->get_fifosize(dev);
2212        uap->port.dev = &dev->dev;
2213        uap->port.mapbase = dev->res.start;
2214        uap->port.membase = base;
2215        uap->port.iotype = UPIO_MEM;
2216        uap->port.irq = dev->irq[0];
2217        uap->port.fifosize = uap->fifosize;
2218        uap->port.ops = &amba_pl011_pops;
2219        uap->port.flags = UPF_BOOT_AUTOCONF;
2220        uap->port.line = i;
2221        pl011_dma_probe(&dev->dev, uap);
2222
2223        /* Ensure interrupts from this UART are masked and cleared */
2224        writew(0, uap->port.membase + UART011_IMSC);
2225        writew(0xffff, uap->port.membase + UART011_ICR);
2226
2227        snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2228
2229        amba_ports[i] = uap;
2230
2231        amba_set_drvdata(dev, uap);
2232
2233        if (!amba_reg.state) {
2234                ret = uart_register_driver(&amba_reg);
2235                if (ret < 0) {
2236                        pr_err("Failed to register AMBA-PL011 driver\n");
2237                        return ret;
2238                }
2239        }
2240
2241        ret = uart_add_one_port(&amba_reg, &uap->port);
2242        if (ret) {
2243                amba_ports[i] = NULL;
2244                uart_unregister_driver(&amba_reg);
2245                pl011_dma_remove(uap);
2246        }
2247
2248        return ret;
2249}
2250
2251static int pl011_remove(struct amba_device *dev)
2252{
2253        struct uart_amba_port *uap = amba_get_drvdata(dev);
2254        bool busy = false;
2255        int i;
2256
2257        uart_remove_one_port(&amba_reg, &uap->port);
2258
2259        for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2260                if (amba_ports[i] == uap)
2261                        amba_ports[i] = NULL;
2262                else if (amba_ports[i])
2263                        busy = true;
2264
2265        pl011_dma_remove(uap);
2266        if (!busy)
2267                uart_unregister_driver(&amba_reg);
2268        return 0;
2269}
2270
2271#ifdef CONFIG_PM_SLEEP
2272static int pl011_suspend(struct device *dev)
2273{
2274        struct uart_amba_port *uap = dev_get_drvdata(dev);
2275
2276        if (!uap)
2277                return -EINVAL;
2278
2279        return uart_suspend_port(&amba_reg, &uap->port);
2280}
2281
2282static int pl011_resume(struct device *dev)
2283{
2284        struct uart_amba_port *uap = dev_get_drvdata(dev);
2285
2286        if (!uap)
2287                return -EINVAL;
2288
2289        return uart_resume_port(&amba_reg, &uap->port);
2290}
2291#endif
2292
2293static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2294
2295static struct amba_id pl011_ids[] = {
2296        {
2297                .id     = 0x00041011,
2298                .mask   = 0x000fffff,
2299                .data   = &vendor_arm,
2300        },
2301        {
2302                .id     = 0x00380802,
2303                .mask   = 0x00ffffff,
2304                .data   = &vendor_st,
2305        },
2306        { 0, 0 },
2307};
2308
2309MODULE_DEVICE_TABLE(amba, pl011_ids);
2310
2311static struct amba_driver pl011_driver = {
2312        .drv = {
2313                .name   = "uart-pl011",
2314                .pm     = &pl011_dev_pm_ops,
2315        },
2316        .id_table       = pl011_ids,
2317        .probe          = pl011_probe,
2318        .remove         = pl011_remove,
2319};
2320
2321static int __init pl011_init(void)
2322{
2323        printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2324
2325        return amba_driver_register(&pl011_driver);
2326}
2327
2328static void __exit pl011_exit(void)
2329{
2330        amba_driver_unregister(&pl011_driver);
2331}
2332
2333/*
2334 * While this can be a module, if builtin it's most likely the console
2335 * So let's leave module_exit but move module_init to an earlier place
2336 */
2337arch_initcall(pl011_init);
2338module_exit(pl011_exit);
2339
2340MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2341MODULE_DESCRIPTION("ARM AMBA serial port driver");
2342MODULE_LICENSE("GPL");
2343