linux/drivers/tty/serial/amba-pl011.c
<<
>>
Prefs
   1/*
   2 *  Driver for AMBA serial ports
   3 *
   4 *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
   5 *
   6 *  Copyright 1999 ARM Limited
   7 *  Copyright (C) 2000 Deep Blue Solutions Ltd.
   8 *  Copyright (C) 2010 ST-Ericsson SA
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  23 *
  24 * This is a generic driver for ARM AMBA-type serial ports.  They
  25 * have a lot of 16550-like features, but are not register compatible.
  26 * Note that although they do have CTS, DCD and DSR inputs, they do
  27 * not have an RI input, nor do they have DTR or RTS outputs.  If
  28 * required, these have to be supplied via some other means (eg, GPIO)
  29 * and hooked into this driver.
  30 */
  31
  32
  33#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
  34#define SUPPORT_SYSRQ
  35#endif
  36
  37#include <linux/module.h>
  38#include <linux/ioport.h>
  39#include <linux/init.h>
  40#include <linux/console.h>
  41#include <linux/sysrq.h>
  42#include <linux/device.h>
  43#include <linux/tty.h>
  44#include <linux/tty_flip.h>
  45#include <linux/serial_core.h>
  46#include <linux/serial.h>
  47#include <linux/amba/bus.h>
  48#include <linux/amba/serial.h>
  49#include <linux/clk.h>
  50#include <linux/slab.h>
  51#include <linux/dmaengine.h>
  52#include <linux/dma-mapping.h>
  53#include <linux/scatterlist.h>
  54#include <linux/delay.h>
  55#include <linux/types.h>
  56#include <linux/of.h>
  57#include <linux/of_device.h>
  58#include <linux/pinctrl/consumer.h>
  59#include <linux/sizes.h>
  60#include <linux/io.h>
  61#include <linux/workqueue.h>
  62
  63#define UART_NR                 14
  64
  65#define SERIAL_AMBA_MAJOR       204
  66#define SERIAL_AMBA_MINOR       64
  67#define SERIAL_AMBA_NR          UART_NR
  68
  69#define AMBA_ISR_PASS_LIMIT     256
  70
  71#define UART_DR_ERROR           (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
  72#define UART_DUMMY_DR_RX        (1 << 16)
  73
  74/* There is by now at least one vendor with differing details, so handle it */
  75struct vendor_data {
  76        unsigned int            ifls;
  77        unsigned int            lcrh_tx;
  78        unsigned int            lcrh_rx;
  79        bool                    oversampling;
  80        bool                    dma_threshold;
  81        bool                    cts_event_workaround;
  82
  83        unsigned int (*get_fifosize)(struct amba_device *dev);
  84};
  85
  86static unsigned int get_fifosize_arm(struct amba_device *dev)
  87{
  88        return amba_rev(dev) < 3 ? 16 : 32;
  89}
  90
  91static struct vendor_data vendor_arm = {
  92        .ifls                   = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
  93        .lcrh_tx                = UART011_LCRH,
  94        .lcrh_rx                = UART011_LCRH,
  95        .oversampling           = false,
  96        .dma_threshold          = false,
  97        .cts_event_workaround   = false,
  98        .get_fifosize           = get_fifosize_arm,
  99};
 100
 101static unsigned int get_fifosize_st(struct amba_device *dev)
 102{
 103        return 64;
 104}
 105
 106static struct vendor_data vendor_st = {
 107        .ifls                   = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
 108        .lcrh_tx                = ST_UART011_LCRH_TX,
 109        .lcrh_rx                = ST_UART011_LCRH_RX,
 110        .oversampling           = true,
 111        .dma_threshold          = true,
 112        .cts_event_workaround   = true,
 113        .get_fifosize           = get_fifosize_st,
 114};
 115
 116/* Deals with DMA transactions */
 117
 118struct pl011_sgbuf {
 119        struct scatterlist sg;
 120        char *buf;
 121};
 122
 123struct pl011_dmarx_data {
 124        struct dma_chan         *chan;
 125        struct completion       complete;
 126        bool                    use_buf_b;
 127        struct pl011_sgbuf      sgbuf_a;
 128        struct pl011_sgbuf      sgbuf_b;
 129        dma_cookie_t            cookie;
 130        bool                    running;
 131        struct timer_list       timer;
 132        unsigned int last_residue;
 133        unsigned long last_jiffies;
 134        bool auto_poll_rate;
 135        unsigned int poll_rate;
 136        unsigned int poll_timeout;
 137};
 138
 139struct pl011_dmatx_data {
 140        struct dma_chan         *chan;
 141        struct scatterlist      sg;
 142        char                    *buf;
 143        bool                    queued;
 144};
 145
 146/*
 147 * We wrap our port structure around the generic uart_port.
 148 */
 149struct uart_amba_port {
 150        struct uart_port        port;
 151        struct clk              *clk;
 152        const struct vendor_data *vendor;
 153        unsigned int            dmacr;          /* dma control reg */
 154        unsigned int            im;             /* interrupt mask */
 155        unsigned int            old_status;
 156        unsigned int            fifosize;       /* vendor-specific */
 157        unsigned int            lcrh_tx;        /* vendor-specific */
 158        unsigned int            lcrh_rx;        /* vendor-specific */
 159        unsigned int            old_cr;         /* state during shutdown */
 160        struct delayed_work     tx_softirq_work;
 161        bool                    autorts;
 162        unsigned int            tx_irq_seen;    /* 0=none, 1=1, 2=2 or more */
 163        char                    type[12];
 164#ifdef CONFIG_DMA_ENGINE
 165        /* DMA stuff */
 166        bool                    using_tx_dma;
 167        bool                    using_rx_dma;
 168        struct pl011_dmarx_data dmarx;
 169        struct pl011_dmatx_data dmatx;
 170        bool                    dma_probed;
 171#endif
 172};
 173
 174/*
 175 * Reads up to 256 characters from the FIFO or until it's empty and
 176 * inserts them into the TTY layer. Returns the number of characters
 177 * read from the FIFO.
 178 */
 179static int pl011_fifo_to_tty(struct uart_amba_port *uap)
 180{
 181        u16 status, ch;
 182        unsigned int flag, max_count = 256;
 183        int fifotaken = 0;
 184
 185        while (max_count--) {
 186                status = readw(uap->port.membase + UART01x_FR);
 187                if (status & UART01x_FR_RXFE)
 188                        break;
 189
 190                /* Take chars from the FIFO and update status */
 191                ch = readw(uap->port.membase + UART01x_DR) |
 192                        UART_DUMMY_DR_RX;
 193                flag = TTY_NORMAL;
 194                uap->port.icount.rx++;
 195                fifotaken++;
 196
 197                if (unlikely(ch & UART_DR_ERROR)) {
 198                        if (ch & UART011_DR_BE) {
 199                                ch &= ~(UART011_DR_FE | UART011_DR_PE);
 200                                uap->port.icount.brk++;
 201                                if (uart_handle_break(&uap->port))
 202                                        continue;
 203                        } else if (ch & UART011_DR_PE)
 204                                uap->port.icount.parity++;
 205                        else if (ch & UART011_DR_FE)
 206                                uap->port.icount.frame++;
 207                        if (ch & UART011_DR_OE)
 208                                uap->port.icount.overrun++;
 209
 210                        ch &= uap->port.read_status_mask;
 211
 212                        if (ch & UART011_DR_BE)
 213                                flag = TTY_BREAK;
 214                        else if (ch & UART011_DR_PE)
 215                                flag = TTY_PARITY;
 216                        else if (ch & UART011_DR_FE)
 217                                flag = TTY_FRAME;
 218                }
 219
 220                if (uart_handle_sysrq_char(&uap->port, ch & 255))
 221                        continue;
 222
 223                uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
 224        }
 225
 226        return fifotaken;
 227}
 228
 229
 230/*
 231 * All the DMA operation mode stuff goes inside this ifdef.
 232 * This assumes that you have a generic DMA device interface,
 233 * no custom DMA interfaces are supported.
 234 */
 235#ifdef CONFIG_DMA_ENGINE
 236
 237#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
 238
 239static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
 240        enum dma_data_direction dir)
 241{
 242        dma_addr_t dma_addr;
 243
 244        sg->buf = dma_alloc_coherent(chan->device->dev,
 245                PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
 246        if (!sg->buf)
 247                return -ENOMEM;
 248
 249        sg_init_table(&sg->sg, 1);
 250        sg_set_page(&sg->sg, phys_to_page(dma_addr),
 251                PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
 252        sg_dma_address(&sg->sg) = dma_addr;
 253        sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
 254
 255        return 0;
 256}
 257
 258static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
 259        enum dma_data_direction dir)
 260{
 261        if (sg->buf) {
 262                dma_free_coherent(chan->device->dev,
 263                        PL011_DMA_BUFFER_SIZE, sg->buf,
 264                        sg_dma_address(&sg->sg));
 265        }
 266}
 267
 268static void pl011_dma_probe(struct uart_amba_port *uap)
 269{
 270        /* DMA is the sole user of the platform data right now */
 271        struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
 272        struct device *dev = uap->port.dev;
 273        struct dma_slave_config tx_conf = {
 274                .dst_addr = uap->port.mapbase + UART01x_DR,
 275                .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
 276                .direction = DMA_MEM_TO_DEV,
 277                .dst_maxburst = uap->fifosize >> 1,
 278                .device_fc = false,
 279        };
 280        struct dma_chan *chan;
 281        dma_cap_mask_t mask;
 282
 283        uap->dma_probed = true;
 284        chan = dma_request_slave_channel_reason(dev, "tx");
 285        if (IS_ERR(chan)) {
 286                if (PTR_ERR(chan) == -EPROBE_DEFER) {
 287                        uap->dma_probed = false;
 288                        return;
 289                }
 290
 291                /* We need platform data */
 292                if (!plat || !plat->dma_filter) {
 293                        dev_info(uap->port.dev, "no DMA platform data\n");
 294                        return;
 295                }
 296
 297                /* Try to acquire a generic DMA engine slave TX channel */
 298                dma_cap_zero(mask);
 299                dma_cap_set(DMA_SLAVE, mask);
 300
 301                chan = dma_request_channel(mask, plat->dma_filter,
 302                                                plat->dma_tx_param);
 303                if (!chan) {
 304                        dev_err(uap->port.dev, "no TX DMA channel!\n");
 305                        return;
 306                }
 307        }
 308
 309        dmaengine_slave_config(chan, &tx_conf);
 310        uap->dmatx.chan = chan;
 311
 312        dev_info(uap->port.dev, "DMA channel TX %s\n",
 313                 dma_chan_name(uap->dmatx.chan));
 314
 315        /* Optionally make use of an RX channel as well */
 316        chan = dma_request_slave_channel(dev, "rx");
 317
 318        if (!chan && plat->dma_rx_param) {
 319                chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
 320
 321                if (!chan) {
 322                        dev_err(uap->port.dev, "no RX DMA channel!\n");
 323                        return;
 324                }
 325        }
 326
 327        if (chan) {
 328                struct dma_slave_config rx_conf = {
 329                        .src_addr = uap->port.mapbase + UART01x_DR,
 330                        .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
 331                        .direction = DMA_DEV_TO_MEM,
 332                        .src_maxburst = uap->fifosize >> 2,
 333                        .device_fc = false,
 334                };
 335                struct dma_slave_caps caps;
 336
 337                /*
 338                 * Some DMA controllers provide information on their capabilities.
 339                 * If the controller does, check for suitable residue processing
 340                 * otherwise assime all is well.
 341                 */
 342                if (0 == dma_get_slave_caps(chan, &caps)) {
 343                        if (caps.residue_granularity ==
 344                                        DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
 345                                dma_release_channel(chan);
 346                                dev_info(uap->port.dev,
 347                                        "RX DMA disabled - no residue processing\n");
 348                                return;
 349                        }
 350                }
 351                dmaengine_slave_config(chan, &rx_conf);
 352                uap->dmarx.chan = chan;
 353
 354                uap->dmarx.auto_poll_rate = false;
 355                if (plat && plat->dma_rx_poll_enable) {
 356                        /* Set poll rate if specified. */
 357                        if (plat->dma_rx_poll_rate) {
 358                                uap->dmarx.auto_poll_rate = false;
 359                                uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
 360                        } else {
 361                                /*
 362                                 * 100 ms defaults to poll rate if not
 363                                 * specified. This will be adjusted with
 364                                 * the baud rate at set_termios.
 365                                 */
 366                                uap->dmarx.auto_poll_rate = true;
 367                                uap->dmarx.poll_rate =  100;
 368                        }
 369                        /* 3 secs defaults poll_timeout if not specified. */
 370                        if (plat->dma_rx_poll_timeout)
 371                                uap->dmarx.poll_timeout =
 372                                        plat->dma_rx_poll_timeout;
 373                        else
 374                                uap->dmarx.poll_timeout = 3000;
 375                } else if (!plat && dev->of_node) {
 376                        uap->dmarx.auto_poll_rate = of_property_read_bool(
 377                                                dev->of_node, "auto-poll");
 378                        if (uap->dmarx.auto_poll_rate) {
 379                                u32 x;
 380
 381                                if (0 == of_property_read_u32(dev->of_node,
 382                                                "poll-rate-ms", &x))
 383                                        uap->dmarx.poll_rate = x;
 384                                else
 385                                        uap->dmarx.poll_rate = 100;
 386                                if (0 == of_property_read_u32(dev->of_node,
 387                                                "poll-timeout-ms", &x))
 388                                        uap->dmarx.poll_timeout = x;
 389                                else
 390                                        uap->dmarx.poll_timeout = 3000;
 391                        }
 392                }
 393                dev_info(uap->port.dev, "DMA channel RX %s\n",
 394                         dma_chan_name(uap->dmarx.chan));
 395        }
 396}
 397
 398static void pl011_dma_remove(struct uart_amba_port *uap)
 399{
 400        if (uap->dmatx.chan)
 401                dma_release_channel(uap->dmatx.chan);
 402        if (uap->dmarx.chan)
 403                dma_release_channel(uap->dmarx.chan);
 404}
 405
 406/* Forward declare these for the refill routine */
 407static int pl011_dma_tx_refill(struct uart_amba_port *uap);
 408static void pl011_start_tx_pio(struct uart_amba_port *uap);
 409
 410/*
 411 * The current DMA TX buffer has been sent.
 412 * Try to queue up another DMA buffer.
 413 */
 414static void pl011_dma_tx_callback(void *data)
 415{
 416        struct uart_amba_port *uap = data;
 417        struct pl011_dmatx_data *dmatx = &uap->dmatx;
 418        unsigned long flags;
 419        u16 dmacr;
 420
 421        spin_lock_irqsave(&uap->port.lock, flags);
 422        if (uap->dmatx.queued)
 423                dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
 424                             DMA_TO_DEVICE);
 425
 426        dmacr = uap->dmacr;
 427        uap->dmacr = dmacr & ~UART011_TXDMAE;
 428        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 429
 430        /*
 431         * If TX DMA was disabled, it means that we've stopped the DMA for
 432         * some reason (eg, XOFF received, or we want to send an X-char.)
 433         *
 434         * Note: we need to be careful here of a potential race between DMA
 435         * and the rest of the driver - if the driver disables TX DMA while
 436         * a TX buffer completing, we must update the tx queued status to
 437         * get further refills (hence we check dmacr).
 438         */
 439        if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
 440            uart_circ_empty(&uap->port.state->xmit)) {
 441                uap->dmatx.queued = false;
 442                spin_unlock_irqrestore(&uap->port.lock, flags);
 443                return;
 444        }
 445
 446        if (pl011_dma_tx_refill(uap) <= 0)
 447                /*
 448                 * We didn't queue a DMA buffer for some reason, but we
 449                 * have data pending to be sent.  Re-enable the TX IRQ.
 450                 */
 451                pl011_start_tx_pio(uap);
 452
 453        spin_unlock_irqrestore(&uap->port.lock, flags);
 454}
 455
 456/*
 457 * Try to refill the TX DMA buffer.
 458 * Locking: called with port lock held and IRQs disabled.
 459 * Returns:
 460 *   1 if we queued up a TX DMA buffer.
 461 *   0 if we didn't want to handle this by DMA
 462 *  <0 on error
 463 */
 464static int pl011_dma_tx_refill(struct uart_amba_port *uap)
 465{
 466        struct pl011_dmatx_data *dmatx = &uap->dmatx;
 467        struct dma_chan *chan = dmatx->chan;
 468        struct dma_device *dma_dev = chan->device;
 469        struct dma_async_tx_descriptor *desc;
 470        struct circ_buf *xmit = &uap->port.state->xmit;
 471        unsigned int count;
 472
 473        /*
 474         * Try to avoid the overhead involved in using DMA if the
 475         * transaction fits in the first half of the FIFO, by using
 476         * the standard interrupt handling.  This ensures that we
 477         * issue a uart_write_wakeup() at the appropriate time.
 478         */
 479        count = uart_circ_chars_pending(xmit);
 480        if (count < (uap->fifosize >> 1)) {
 481                uap->dmatx.queued = false;
 482                return 0;
 483        }
 484
 485        /*
 486         * Bodge: don't send the last character by DMA, as this
 487         * will prevent XON from notifying us to restart DMA.
 488         */
 489        count -= 1;
 490
 491        /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
 492        if (count > PL011_DMA_BUFFER_SIZE)
 493                count = PL011_DMA_BUFFER_SIZE;
 494
 495        if (xmit->tail < xmit->head)
 496                memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
 497        else {
 498                size_t first = UART_XMIT_SIZE - xmit->tail;
 499                size_t second;
 500
 501                if (first > count)
 502                        first = count;
 503                second = count - first;
 504
 505                memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
 506                if (second)
 507                        memcpy(&dmatx->buf[first], &xmit->buf[0], second);
 508        }
 509
 510        dmatx->sg.length = count;
 511
 512        if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
 513                uap->dmatx.queued = false;
 514                dev_dbg(uap->port.dev, "unable to map TX DMA\n");
 515                return -EBUSY;
 516        }
 517
 518        desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
 519                                             DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 520        if (!desc) {
 521                dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
 522                uap->dmatx.queued = false;
 523                /*
 524                 * If DMA cannot be used right now, we complete this
 525                 * transaction via IRQ and let the TTY layer retry.
 526                 */
 527                dev_dbg(uap->port.dev, "TX DMA busy\n");
 528                return -EBUSY;
 529        }
 530
 531        /* Some data to go along to the callback */
 532        desc->callback = pl011_dma_tx_callback;
 533        desc->callback_param = uap;
 534
 535        /* All errors should happen at prepare time */
 536        dmaengine_submit(desc);
 537
 538        /* Fire the DMA transaction */
 539        dma_dev->device_issue_pending(chan);
 540
 541        uap->dmacr |= UART011_TXDMAE;
 542        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 543        uap->dmatx.queued = true;
 544
 545        /*
 546         * Now we know that DMA will fire, so advance the ring buffer
 547         * with the stuff we just dispatched.
 548         */
 549        xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
 550        uap->port.icount.tx += count;
 551
 552        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 553                uart_write_wakeup(&uap->port);
 554
 555        return 1;
 556}
 557
 558/*
 559 * We received a transmit interrupt without a pending X-char but with
 560 * pending characters.
 561 * Locking: called with port lock held and IRQs disabled.
 562 * Returns:
 563 *   false if we want to use PIO to transmit
 564 *   true if we queued a DMA buffer
 565 */
 566static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
 567{
 568        if (!uap->using_tx_dma)
 569                return false;
 570
 571        /*
 572         * If we already have a TX buffer queued, but received a
 573         * TX interrupt, it will be because we've just sent an X-char.
 574         * Ensure the TX DMA is enabled and the TX IRQ is disabled.
 575         */
 576        if (uap->dmatx.queued) {
 577                uap->dmacr |= UART011_TXDMAE;
 578                writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 579                uap->im &= ~UART011_TXIM;
 580                writew(uap->im, uap->port.membase + UART011_IMSC);
 581                return true;
 582        }
 583
 584        /*
 585         * We don't have a TX buffer queued, so try to queue one.
 586         * If we successfully queued a buffer, mask the TX IRQ.
 587         */
 588        if (pl011_dma_tx_refill(uap) > 0) {
 589                uap->im &= ~UART011_TXIM;
 590                writew(uap->im, uap->port.membase + UART011_IMSC);
 591                return true;
 592        }
 593        return false;
 594}
 595
 596/*
 597 * Stop the DMA transmit (eg, due to received XOFF).
 598 * Locking: called with port lock held and IRQs disabled.
 599 */
 600static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
 601{
 602        if (uap->dmatx.queued) {
 603                uap->dmacr &= ~UART011_TXDMAE;
 604                writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 605        }
 606}
 607
 608/*
 609 * Try to start a DMA transmit, or in the case of an XON/OFF
 610 * character queued for send, try to get that character out ASAP.
 611 * Locking: called with port lock held and IRQs disabled.
 612 * Returns:
 613 *   false if we want the TX IRQ to be enabled
 614 *   true if we have a buffer queued
 615 */
 616static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
 617{
 618        u16 dmacr;
 619
 620        if (!uap->using_tx_dma)
 621                return false;
 622
 623        if (!uap->port.x_char) {
 624                /* no X-char, try to push chars out in DMA mode */
 625                bool ret = true;
 626
 627                if (!uap->dmatx.queued) {
 628                        if (pl011_dma_tx_refill(uap) > 0) {
 629                                uap->im &= ~UART011_TXIM;
 630                                writew(uap->im, uap->port.membase +
 631                                       UART011_IMSC);
 632                        } else
 633                                ret = false;
 634                } else if (!(uap->dmacr & UART011_TXDMAE)) {
 635                        uap->dmacr |= UART011_TXDMAE;
 636                        writew(uap->dmacr,
 637                                       uap->port.membase + UART011_DMACR);
 638                }
 639                return ret;
 640        }
 641
 642        /*
 643         * We have an X-char to send.  Disable DMA to prevent it loading
 644         * the TX fifo, and then see if we can stuff it into the FIFO.
 645         */
 646        dmacr = uap->dmacr;
 647        uap->dmacr &= ~UART011_TXDMAE;
 648        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 649
 650        if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
 651                /*
 652                 * No space in the FIFO, so enable the transmit interrupt
 653                 * so we know when there is space.  Note that once we've
 654                 * loaded the character, we should just re-enable DMA.
 655                 */
 656                return false;
 657        }
 658
 659        writew(uap->port.x_char, uap->port.membase + UART01x_DR);
 660        uap->port.icount.tx++;
 661        uap->port.x_char = 0;
 662
 663        /* Success - restore the DMA state */
 664        uap->dmacr = dmacr;
 665        writew(dmacr, uap->port.membase + UART011_DMACR);
 666
 667        return true;
 668}
 669
 670/*
 671 * Flush the transmit buffer.
 672 * Locking: called with port lock held and IRQs disabled.
 673 */
 674static void pl011_dma_flush_buffer(struct uart_port *port)
 675__releases(&uap->port.lock)
 676__acquires(&uap->port.lock)
 677{
 678        struct uart_amba_port *uap =
 679            container_of(port, struct uart_amba_port, port);
 680
 681        if (!uap->using_tx_dma)
 682                return;
 683
 684        /* Avoid deadlock with the DMA engine callback */
 685        spin_unlock(&uap->port.lock);
 686        dmaengine_terminate_all(uap->dmatx.chan);
 687        spin_lock(&uap->port.lock);
 688        if (uap->dmatx.queued) {
 689                dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
 690                             DMA_TO_DEVICE);
 691                uap->dmatx.queued = false;
 692                uap->dmacr &= ~UART011_TXDMAE;
 693                writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 694        }
 695}
 696
 697static void pl011_dma_rx_callback(void *data);
 698
 699static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
 700{
 701        struct dma_chan *rxchan = uap->dmarx.chan;
 702        struct pl011_dmarx_data *dmarx = &uap->dmarx;
 703        struct dma_async_tx_descriptor *desc;
 704        struct pl011_sgbuf *sgbuf;
 705
 706        if (!rxchan)
 707                return -EIO;
 708
 709        /* Start the RX DMA job */
 710        sgbuf = uap->dmarx.use_buf_b ?
 711                &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
 712        desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
 713                                        DMA_DEV_TO_MEM,
 714                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 715        /*
 716         * If the DMA engine is busy and cannot prepare a
 717         * channel, no big deal, the driver will fall back
 718         * to interrupt mode as a result of this error code.
 719         */
 720        if (!desc) {
 721                uap->dmarx.running = false;
 722                dmaengine_terminate_all(rxchan);
 723                return -EBUSY;
 724        }
 725
 726        /* Some data to go along to the callback */
 727        desc->callback = pl011_dma_rx_callback;
 728        desc->callback_param = uap;
 729        dmarx->cookie = dmaengine_submit(desc);
 730        dma_async_issue_pending(rxchan);
 731
 732        uap->dmacr |= UART011_RXDMAE;
 733        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 734        uap->dmarx.running = true;
 735
 736        uap->im &= ~UART011_RXIM;
 737        writew(uap->im, uap->port.membase + UART011_IMSC);
 738
 739        return 0;
 740}
 741
 742/*
 743 * This is called when either the DMA job is complete, or
 744 * the FIFO timeout interrupt occurred. This must be called
 745 * with the port spinlock uap->port.lock held.
 746 */
 747static void pl011_dma_rx_chars(struct uart_amba_port *uap,
 748                               u32 pending, bool use_buf_b,
 749                               bool readfifo)
 750{
 751        struct tty_port *port = &uap->port.state->port;
 752        struct pl011_sgbuf *sgbuf = use_buf_b ?
 753                &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
 754        int dma_count = 0;
 755        u32 fifotaken = 0; /* only used for vdbg() */
 756
 757        struct pl011_dmarx_data *dmarx = &uap->dmarx;
 758        int dmataken = 0;
 759
 760        if (uap->dmarx.poll_rate) {
 761                /* The data can be taken by polling */
 762                dmataken = sgbuf->sg.length - dmarx->last_residue;
 763                /* Recalculate the pending size */
 764                if (pending >= dmataken)
 765                        pending -= dmataken;
 766        }
 767
 768        /* Pick the remain data from the DMA */
 769        if (pending) {
 770
 771                /*
 772                 * First take all chars in the DMA pipe, then look in the FIFO.
 773                 * Note that tty_insert_flip_buf() tries to take as many chars
 774                 * as it can.
 775                 */
 776                dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
 777                                pending);
 778
 779                uap->port.icount.rx += dma_count;
 780                if (dma_count < pending)
 781                        dev_warn(uap->port.dev,
 782                                 "couldn't insert all characters (TTY is full?)\n");
 783        }
 784
 785        /* Reset the last_residue for Rx DMA poll */
 786        if (uap->dmarx.poll_rate)
 787                dmarx->last_residue = sgbuf->sg.length;
 788
 789        /*
 790         * Only continue with trying to read the FIFO if all DMA chars have
 791         * been taken first.
 792         */
 793        if (dma_count == pending && readfifo) {
 794                /* Clear any error flags */
 795                writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
 796                       uap->port.membase + UART011_ICR);
 797
 798                /*
 799                 * If we read all the DMA'd characters, and we had an
 800                 * incomplete buffer, that could be due to an rx error, or
 801                 * maybe we just timed out. Read any pending chars and check
 802                 * the error status.
 803                 *
 804                 * Error conditions will only occur in the FIFO, these will
 805                 * trigger an immediate interrupt and stop the DMA job, so we
 806                 * will always find the error in the FIFO, never in the DMA
 807                 * buffer.
 808                 */
 809                fifotaken = pl011_fifo_to_tty(uap);
 810        }
 811
 812        spin_unlock(&uap->port.lock);
 813        dev_vdbg(uap->port.dev,
 814                 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
 815                 dma_count, fifotaken);
 816        tty_flip_buffer_push(port);
 817        spin_lock(&uap->port.lock);
 818}
 819
 820static void pl011_dma_rx_irq(struct uart_amba_port *uap)
 821{
 822        struct pl011_dmarx_data *dmarx = &uap->dmarx;
 823        struct dma_chan *rxchan = dmarx->chan;
 824        struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
 825                &dmarx->sgbuf_b : &dmarx->sgbuf_a;
 826        size_t pending;
 827        struct dma_tx_state state;
 828        enum dma_status dmastat;
 829
 830        /*
 831         * Pause the transfer so we can trust the current counter,
 832         * do this before we pause the PL011 block, else we may
 833         * overflow the FIFO.
 834         */
 835        if (dmaengine_pause(rxchan))
 836                dev_err(uap->port.dev, "unable to pause DMA transfer\n");
 837        dmastat = rxchan->device->device_tx_status(rxchan,
 838                                                   dmarx->cookie, &state);
 839        if (dmastat != DMA_PAUSED)
 840                dev_err(uap->port.dev, "unable to pause DMA transfer\n");
 841
 842        /* Disable RX DMA - incoming data will wait in the FIFO */
 843        uap->dmacr &= ~UART011_RXDMAE;
 844        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 845        uap->dmarx.running = false;
 846
 847        pending = sgbuf->sg.length - state.residue;
 848        BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
 849        /* Then we terminate the transfer - we now know our residue */
 850        dmaengine_terminate_all(rxchan);
 851
 852        /*
 853         * This will take the chars we have so far and insert
 854         * into the framework.
 855         */
 856        pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
 857
 858        /* Switch buffer & re-trigger DMA job */
 859        dmarx->use_buf_b = !dmarx->use_buf_b;
 860        if (pl011_dma_rx_trigger_dma(uap)) {
 861                dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
 862                        "fall back to interrupt mode\n");
 863                uap->im |= UART011_RXIM;
 864                writew(uap->im, uap->port.membase + UART011_IMSC);
 865        }
 866}
 867
 868static void pl011_dma_rx_callback(void *data)
 869{
 870        struct uart_amba_port *uap = data;
 871        struct pl011_dmarx_data *dmarx = &uap->dmarx;
 872        struct dma_chan *rxchan = dmarx->chan;
 873        bool lastbuf = dmarx->use_buf_b;
 874        struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
 875                &dmarx->sgbuf_b : &dmarx->sgbuf_a;
 876        size_t pending;
 877        struct dma_tx_state state;
 878        int ret;
 879
 880        /*
 881         * This completion interrupt occurs typically when the
 882         * RX buffer is totally stuffed but no timeout has yet
 883         * occurred. When that happens, we just want the RX
 884         * routine to flush out the secondary DMA buffer while
 885         * we immediately trigger the next DMA job.
 886         */
 887        spin_lock_irq(&uap->port.lock);
 888        /*
 889         * Rx data can be taken by the UART interrupts during
 890         * the DMA irq handler. So we check the residue here.
 891         */
 892        rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
 893        pending = sgbuf->sg.length - state.residue;
 894        BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
 895        /* Then we terminate the transfer - we now know our residue */
 896        dmaengine_terminate_all(rxchan);
 897
 898        uap->dmarx.running = false;
 899        dmarx->use_buf_b = !lastbuf;
 900        ret = pl011_dma_rx_trigger_dma(uap);
 901
 902        pl011_dma_rx_chars(uap, pending, lastbuf, false);
 903        spin_unlock_irq(&uap->port.lock);
 904        /*
 905         * Do this check after we picked the DMA chars so we don't
 906         * get some IRQ immediately from RX.
 907         */
 908        if (ret) {
 909                dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
 910                        "fall back to interrupt mode\n");
 911                uap->im |= UART011_RXIM;
 912                writew(uap->im, uap->port.membase + UART011_IMSC);
 913        }
 914}
 915
 916/*
 917 * Stop accepting received characters, when we're shutting down or
 918 * suspending this port.
 919 * Locking: called with port lock held and IRQs disabled.
 920 */
 921static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
 922{
 923        /* FIXME.  Just disable the DMA enable */
 924        uap->dmacr &= ~UART011_RXDMAE;
 925        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 926}
 927
 928/*
 929 * Timer handler for Rx DMA polling.
 930 * Every polling, It checks the residue in the dma buffer and transfer
 931 * data to the tty. Also, last_residue is updated for the next polling.
 932 */
 933static void pl011_dma_rx_poll(unsigned long args)
 934{
 935        struct uart_amba_port *uap = (struct uart_amba_port *)args;
 936        struct tty_port *port = &uap->port.state->port;
 937        struct pl011_dmarx_data *dmarx = &uap->dmarx;
 938        struct dma_chan *rxchan = uap->dmarx.chan;
 939        unsigned long flags = 0;
 940        unsigned int dmataken = 0;
 941        unsigned int size = 0;
 942        struct pl011_sgbuf *sgbuf;
 943        int dma_count;
 944        struct dma_tx_state state;
 945
 946        sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
 947        rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
 948        if (likely(state.residue < dmarx->last_residue)) {
 949                dmataken = sgbuf->sg.length - dmarx->last_residue;
 950                size = dmarx->last_residue - state.residue;
 951                dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
 952                                size);
 953                if (dma_count == size)
 954                        dmarx->last_residue =  state.residue;
 955                dmarx->last_jiffies = jiffies;
 956        }
 957        tty_flip_buffer_push(port);
 958
 959        /*
 960         * If no data is received in poll_timeout, the driver will fall back
 961         * to interrupt mode. We will retrigger DMA at the first interrupt.
 962         */
 963        if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
 964                        > uap->dmarx.poll_timeout) {
 965
 966                spin_lock_irqsave(&uap->port.lock, flags);
 967                pl011_dma_rx_stop(uap);
 968                uap->im |= UART011_RXIM;
 969                writew(uap->im, uap->port.membase + UART011_IMSC);
 970                spin_unlock_irqrestore(&uap->port.lock, flags);
 971
 972                uap->dmarx.running = false;
 973                dmaengine_terminate_all(rxchan);
 974                del_timer(&uap->dmarx.timer);
 975        } else {
 976                mod_timer(&uap->dmarx.timer,
 977                        jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
 978        }
 979}
 980
 981static void pl011_dma_startup(struct uart_amba_port *uap)
 982{
 983        int ret;
 984
 985        if (!uap->dma_probed)
 986                pl011_dma_probe(uap);
 987
 988        if (!uap->dmatx.chan)
 989                return;
 990
 991        uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
 992        if (!uap->dmatx.buf) {
 993                dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
 994                uap->port.fifosize = uap->fifosize;
 995                return;
 996        }
 997
 998        sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
 999
1000        /* The DMA buffer is now the FIFO the TTY subsystem can use */
1001        uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1002        uap->using_tx_dma = true;
1003
1004        if (!uap->dmarx.chan)
1005                goto skip_rx;
1006
1007        /* Allocate and map DMA RX buffers */
1008        ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1009                               DMA_FROM_DEVICE);
1010        if (ret) {
1011                dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1012                        "RX buffer A", ret);
1013                goto skip_rx;
1014        }
1015
1016        ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1017                               DMA_FROM_DEVICE);
1018        if (ret) {
1019                dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1020                        "RX buffer B", ret);
1021                pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1022                                 DMA_FROM_DEVICE);
1023                goto skip_rx;
1024        }
1025
1026        uap->using_rx_dma = true;
1027
1028skip_rx:
1029        /* Turn on DMA error (RX/TX will be enabled on demand) */
1030        uap->dmacr |= UART011_DMAONERR;
1031        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
1032
1033        /*
1034         * ST Micro variants has some specific dma burst threshold
1035         * compensation. Set this to 16 bytes, so burst will only
1036         * be issued above/below 16 bytes.
1037         */
1038        if (uap->vendor->dma_threshold)
1039                writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1040                               uap->port.membase + ST_UART011_DMAWM);
1041
1042        if (uap->using_rx_dma) {
1043                if (pl011_dma_rx_trigger_dma(uap))
1044                        dev_dbg(uap->port.dev, "could not trigger initial "
1045                                "RX DMA job, fall back to interrupt mode\n");
1046                if (uap->dmarx.poll_rate) {
1047                        init_timer(&(uap->dmarx.timer));
1048                        uap->dmarx.timer.function = pl011_dma_rx_poll;
1049                        uap->dmarx.timer.data = (unsigned long)uap;
1050                        mod_timer(&uap->dmarx.timer,
1051                                jiffies +
1052                                msecs_to_jiffies(uap->dmarx.poll_rate));
1053                        uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1054                        uap->dmarx.last_jiffies = jiffies;
1055                }
1056        }
1057}
1058
1059static void pl011_dma_shutdown(struct uart_amba_port *uap)
1060{
1061        if (!(uap->using_tx_dma || uap->using_rx_dma))
1062                return;
1063
1064        /* Disable RX and TX DMA */
1065        while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
1066                barrier();
1067
1068        spin_lock_irq(&uap->port.lock);
1069        uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1070        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
1071        spin_unlock_irq(&uap->port.lock);
1072
1073        if (uap->using_tx_dma) {
1074                /* In theory, this should already be done by pl011_dma_flush_buffer */
1075                dmaengine_terminate_all(uap->dmatx.chan);
1076                if (uap->dmatx.queued) {
1077                        dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1078                                     DMA_TO_DEVICE);
1079                        uap->dmatx.queued = false;
1080                }
1081
1082                kfree(uap->dmatx.buf);
1083                uap->using_tx_dma = false;
1084        }
1085
1086        if (uap->using_rx_dma) {
1087                dmaengine_terminate_all(uap->dmarx.chan);
1088                /* Clean up the RX DMA */
1089                pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1090                pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1091                if (uap->dmarx.poll_rate)
1092                        del_timer_sync(&uap->dmarx.timer);
1093                uap->using_rx_dma = false;
1094        }
1095}
1096
1097static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1098{
1099        return uap->using_rx_dma;
1100}
1101
1102static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1103{
1104        return uap->using_rx_dma && uap->dmarx.running;
1105}
1106
1107#else
1108/* Blank functions if the DMA engine is not available */
1109static inline void pl011_dma_probe(struct uart_amba_port *uap)
1110{
1111}
1112
1113static inline void pl011_dma_remove(struct uart_amba_port *uap)
1114{
1115}
1116
1117static inline void pl011_dma_startup(struct uart_amba_port *uap)
1118{
1119}
1120
1121static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1122{
1123}
1124
1125static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1126{
1127        return false;
1128}
1129
1130static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1131{
1132}
1133
1134static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1135{
1136        return false;
1137}
1138
1139static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1140{
1141}
1142
1143static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1144{
1145}
1146
1147static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1148{
1149        return -EIO;
1150}
1151
1152static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1153{
1154        return false;
1155}
1156
1157static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1158{
1159        return false;
1160}
1161
1162#define pl011_dma_flush_buffer  NULL
1163#endif
1164
1165static void pl011_stop_tx(struct uart_port *port)
1166{
1167        struct uart_amba_port *uap =
1168            container_of(port, struct uart_amba_port, port);
1169
1170        uap->im &= ~UART011_TXIM;
1171        writew(uap->im, uap->port.membase + UART011_IMSC);
1172        pl011_dma_tx_stop(uap);
1173}
1174
1175static bool pl011_tx_chars(struct uart_amba_port *uap);
1176
1177/* Start TX with programmed I/O only (no DMA) */
1178static void pl011_start_tx_pio(struct uart_amba_port *uap)
1179{
1180        uap->im |= UART011_TXIM;
1181        writew(uap->im, uap->port.membase + UART011_IMSC);
1182        if (!uap->tx_irq_seen)
1183                pl011_tx_chars(uap);
1184}
1185
1186static void pl011_start_tx(struct uart_port *port)
1187{
1188        struct uart_amba_port *uap =
1189            container_of(port, struct uart_amba_port, port);
1190
1191        if (!pl011_dma_tx_start(uap))
1192                pl011_start_tx_pio(uap);
1193}
1194
1195static void pl011_stop_rx(struct uart_port *port)
1196{
1197        struct uart_amba_port *uap =
1198            container_of(port, struct uart_amba_port, port);
1199
1200        uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1201                     UART011_PEIM|UART011_BEIM|UART011_OEIM);
1202        writew(uap->im, uap->port.membase + UART011_IMSC);
1203
1204        pl011_dma_rx_stop(uap);
1205}
1206
1207static void pl011_enable_ms(struct uart_port *port)
1208{
1209        struct uart_amba_port *uap =
1210            container_of(port, struct uart_amba_port, port);
1211
1212        uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1213        writew(uap->im, uap->port.membase + UART011_IMSC);
1214}
1215
1216static void pl011_rx_chars(struct uart_amba_port *uap)
1217__releases(&uap->port.lock)
1218__acquires(&uap->port.lock)
1219{
1220        pl011_fifo_to_tty(uap);
1221
1222        spin_unlock(&uap->port.lock);
1223        tty_flip_buffer_push(&uap->port.state->port);
1224        /*
1225         * If we were temporarily out of DMA mode for a while,
1226         * attempt to switch back to DMA mode again.
1227         */
1228        if (pl011_dma_rx_available(uap)) {
1229                if (pl011_dma_rx_trigger_dma(uap)) {
1230                        dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1231                                "fall back to interrupt mode again\n");
1232                        uap->im |= UART011_RXIM;
1233                        writew(uap->im, uap->port.membase + UART011_IMSC);
1234                } else {
1235#ifdef CONFIG_DMA_ENGINE
1236                        /* Start Rx DMA poll */
1237                        if (uap->dmarx.poll_rate) {
1238                                uap->dmarx.last_jiffies = jiffies;
1239                                uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1240                                mod_timer(&uap->dmarx.timer,
1241                                        jiffies +
1242                                        msecs_to_jiffies(uap->dmarx.poll_rate));
1243                        }
1244#endif
1245                }
1246        }
1247        spin_lock(&uap->port.lock);
1248}
1249
1250/*
1251 * Transmit a character
1252 *
1253 * Returns true if the character was successfully queued to the FIFO.
1254 * Returns false otherwise.
1255 */
1256static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c)
1257{
1258        if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1259                return false; /* unable to transmit character */
1260
1261        writew(c, uap->port.membase + UART01x_DR);
1262        uap->port.icount.tx++;
1263
1264        return true;
1265}
1266
1267static bool pl011_tx_chars(struct uart_amba_port *uap)
1268{
1269        struct circ_buf *xmit = &uap->port.state->xmit;
1270        int count;
1271
1272        if (unlikely(uap->tx_irq_seen < 2))
1273                /*
1274                 * Initial FIFO fill level unknown: we must check TXFF
1275                 * after each write, so just try to fill up the FIFO.
1276                 */
1277                count = uap->fifosize;
1278        else /* tx_irq_seen >= 2 */
1279                /*
1280                 * FIFO initially at least half-empty, so we can simply
1281                 * write half the FIFO without polling TXFF.
1282
1283                 * Note: the *first* TX IRQ can still race with
1284                 * pl011_start_tx_pio(), which can result in the FIFO
1285                 * being fuller than expected in that case.
1286                 */
1287                count = uap->fifosize >> 1;
1288
1289        /*
1290         * If the FIFO is full we're guaranteed a TX IRQ at some later point,
1291         * and can't transmit immediately in any case:
1292         */
1293        if (unlikely(uap->tx_irq_seen < 2 &&
1294                     readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF))
1295                return false;
1296
1297        if (uap->port.x_char) {
1298                if (!pl011_tx_char(uap, uap->port.x_char))
1299                        goto done;
1300                uap->port.x_char = 0;
1301                --count;
1302        }
1303        if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1304                pl011_stop_tx(&uap->port);
1305                goto done;
1306        }
1307
1308        /* If we are using DMA mode, try to send some characters. */
1309        if (pl011_dma_tx_irq(uap))
1310                goto done;
1311
1312        while (count-- > 0 && pl011_tx_char(uap, xmit->buf[xmit->tail])) {
1313                xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1314                if (uart_circ_empty(xmit))
1315                        break;
1316        }
1317
1318        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1319                uart_write_wakeup(&uap->port);
1320
1321        if (uart_circ_empty(xmit)) {
1322                pl011_stop_tx(&uap->port);
1323                goto done;
1324        }
1325
1326        if (unlikely(!uap->tx_irq_seen))
1327                schedule_delayed_work(&uap->tx_softirq_work, uap->port.timeout);
1328
1329done:
1330        return false;
1331}
1332
1333static void pl011_modem_status(struct uart_amba_port *uap)
1334{
1335        unsigned int status, delta;
1336
1337        status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1338
1339        delta = status ^ uap->old_status;
1340        uap->old_status = status;
1341
1342        if (!delta)
1343                return;
1344
1345        if (delta & UART01x_FR_DCD)
1346                uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1347
1348        if (delta & UART01x_FR_DSR)
1349                uap->port.icount.dsr++;
1350
1351        if (delta & UART01x_FR_CTS)
1352                uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
1353
1354        wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1355}
1356
1357static void pl011_tx_softirq(struct work_struct *work)
1358{
1359        struct delayed_work *dwork = to_delayed_work(work);
1360        struct uart_amba_port *uap =
1361                container_of(dwork, struct uart_amba_port, tx_softirq_work);
1362
1363        spin_lock(&uap->port.lock);
1364        while (pl011_tx_chars(uap)) ;
1365        spin_unlock(&uap->port.lock);
1366}
1367
1368static void pl011_tx_irq_seen(struct uart_amba_port *uap)
1369{
1370        if (likely(uap->tx_irq_seen > 1))
1371                return;
1372
1373        uap->tx_irq_seen++;
1374        if (uap->tx_irq_seen < 2)
1375                /* first TX IRQ */
1376                cancel_delayed_work(&uap->tx_softirq_work);
1377}
1378
1379static irqreturn_t pl011_int(int irq, void *dev_id)
1380{
1381        struct uart_amba_port *uap = dev_id;
1382        unsigned long flags;
1383        unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1384        int handled = 0;
1385        unsigned int dummy_read;
1386
1387        spin_lock_irqsave(&uap->port.lock, flags);
1388        status = readw(uap->port.membase + UART011_MIS);
1389        if (status) {
1390                do {
1391                        if (uap->vendor->cts_event_workaround) {
1392                                /* workaround to make sure that all bits are unlocked.. */
1393                                writew(0x00, uap->port.membase + UART011_ICR);
1394
1395                                /*
1396                                 * WA: introduce 26ns(1 uart clk) delay before W1C;
1397                                 * single apb access will incur 2 pclk(133.12Mhz) delay,
1398                                 * so add 2 dummy reads
1399                                 */
1400                                dummy_read = readw(uap->port.membase + UART011_ICR);
1401                                dummy_read = readw(uap->port.membase + UART011_ICR);
1402                        }
1403
1404                        writew(status & ~(UART011_TXIS|UART011_RTIS|
1405                                          UART011_RXIS),
1406                               uap->port.membase + UART011_ICR);
1407
1408                        if (status & (UART011_RTIS|UART011_RXIS)) {
1409                                if (pl011_dma_rx_running(uap))
1410                                        pl011_dma_rx_irq(uap);
1411                                else
1412                                        pl011_rx_chars(uap);
1413                        }
1414                        if (status & (UART011_DSRMIS|UART011_DCDMIS|
1415                                      UART011_CTSMIS|UART011_RIMIS))
1416                                pl011_modem_status(uap);
1417                        if (status & UART011_TXIS) {
1418                                pl011_tx_irq_seen(uap);
1419                                pl011_tx_chars(uap);
1420                        }
1421
1422                        if (pass_counter-- == 0)
1423                                break;
1424
1425                        status = readw(uap->port.membase + UART011_MIS);
1426                } while (status != 0);
1427                handled = 1;
1428        }
1429
1430        spin_unlock_irqrestore(&uap->port.lock, flags);
1431
1432        return IRQ_RETVAL(handled);
1433}
1434
1435static unsigned int pl011_tx_empty(struct uart_port *port)
1436{
1437        struct uart_amba_port *uap =
1438            container_of(port, struct uart_amba_port, port);
1439        unsigned int status = readw(uap->port.membase + UART01x_FR);
1440        return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
1441}
1442
1443static unsigned int pl011_get_mctrl(struct uart_port *port)
1444{
1445        struct uart_amba_port *uap =
1446            container_of(port, struct uart_amba_port, port);
1447        unsigned int result = 0;
1448        unsigned int status = readw(uap->port.membase + UART01x_FR);
1449
1450#define TIOCMBIT(uartbit, tiocmbit)     \
1451        if (status & uartbit)           \
1452                result |= tiocmbit
1453
1454        TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1455        TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
1456        TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
1457        TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
1458#undef TIOCMBIT
1459        return result;
1460}
1461
1462static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1463{
1464        struct uart_amba_port *uap =
1465            container_of(port, struct uart_amba_port, port);
1466        unsigned int cr;
1467
1468        cr = readw(uap->port.membase + UART011_CR);
1469
1470#define TIOCMBIT(tiocmbit, uartbit)             \
1471        if (mctrl & tiocmbit)           \
1472                cr |= uartbit;          \
1473        else                            \
1474                cr &= ~uartbit
1475
1476        TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1477        TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1478        TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1479        TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1480        TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1481
1482        if (uap->autorts) {
1483                /* We need to disable auto-RTS if we want to turn RTS off */
1484                TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1485        }
1486#undef TIOCMBIT
1487
1488        writew(cr, uap->port.membase + UART011_CR);
1489}
1490
1491static void pl011_break_ctl(struct uart_port *port, int break_state)
1492{
1493        struct uart_amba_port *uap =
1494            container_of(port, struct uart_amba_port, port);
1495        unsigned long flags;
1496        unsigned int lcr_h;
1497
1498        spin_lock_irqsave(&uap->port.lock, flags);
1499        lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1500        if (break_state == -1)
1501                lcr_h |= UART01x_LCRH_BRK;
1502        else
1503                lcr_h &= ~UART01x_LCRH_BRK;
1504        writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1505        spin_unlock_irqrestore(&uap->port.lock, flags);
1506}
1507
1508#ifdef CONFIG_CONSOLE_POLL
1509
1510static void pl011_quiesce_irqs(struct uart_port *port)
1511{
1512        struct uart_amba_port *uap =
1513            container_of(port, struct uart_amba_port, port);
1514        unsigned char __iomem *regs = uap->port.membase;
1515
1516        writew(readw(regs + UART011_MIS), regs + UART011_ICR);
1517        /*
1518         * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1519         * we simply mask it. start_tx() will unmask it.
1520         *
1521         * Note we can race with start_tx(), and if the race happens, the
1522         * polling user might get another interrupt just after we clear it.
1523         * But it should be OK and can happen even w/o the race, e.g.
1524         * controller immediately got some new data and raised the IRQ.
1525         *
1526         * And whoever uses polling routines assumes that it manages the device
1527         * (including tx queue), so we're also fine with start_tx()'s caller
1528         * side.
1529         */
1530        writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC);
1531}
1532
1533static int pl011_get_poll_char(struct uart_port *port)
1534{
1535        struct uart_amba_port *uap =
1536            container_of(port, struct uart_amba_port, port);
1537        unsigned int status;
1538
1539        /*
1540         * The caller might need IRQs lowered, e.g. if used with KDB NMI
1541         * debugger.
1542         */
1543        pl011_quiesce_irqs(port);
1544
1545        status = readw(uap->port.membase + UART01x_FR);
1546        if (status & UART01x_FR_RXFE)
1547                return NO_POLL_CHAR;
1548
1549        return readw(uap->port.membase + UART01x_DR);
1550}
1551
1552static void pl011_put_poll_char(struct uart_port *port,
1553                         unsigned char ch)
1554{
1555        struct uart_amba_port *uap =
1556            container_of(port, struct uart_amba_port, port);
1557
1558        while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1559                barrier();
1560
1561        writew(ch, uap->port.membase + UART01x_DR);
1562}
1563
1564#endif /* CONFIG_CONSOLE_POLL */
1565
1566static int pl011_hwinit(struct uart_port *port)
1567{
1568        struct uart_amba_port *uap =
1569            container_of(port, struct uart_amba_port, port);
1570        int retval;
1571
1572        /* Optionaly enable pins to be muxed in and configured */
1573        pinctrl_pm_select_default_state(port->dev);
1574
1575        /*
1576         * Try to enable the clock producer.
1577         */
1578        retval = clk_prepare_enable(uap->clk);
1579        if (retval)
1580                return retval;
1581
1582        uap->port.uartclk = clk_get_rate(uap->clk);
1583
1584        /* Clear pending error and receive interrupts */
1585        writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
1586               UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
1587
1588        /*
1589         * Save interrupts enable mask, and enable RX interrupts in case if
1590         * the interrupt is used for NMI entry.
1591         */
1592        uap->im = readw(uap->port.membase + UART011_IMSC);
1593        writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC);
1594
1595        if (dev_get_platdata(uap->port.dev)) {
1596                struct amba_pl011_data *plat;
1597
1598                plat = dev_get_platdata(uap->port.dev);
1599                if (plat->init)
1600                        plat->init();
1601        }
1602        return 0;
1603}
1604
1605static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1606{
1607        writew(lcr_h, uap->port.membase + uap->lcrh_rx);
1608        if (uap->lcrh_rx != uap->lcrh_tx) {
1609                int i;
1610                /*
1611                 * Wait 10 PCLKs before writing LCRH_TX register,
1612                 * to get this delay write read only register 10 times
1613                 */
1614                for (i = 0; i < 10; ++i)
1615                        writew(0xff, uap->port.membase + UART011_MIS);
1616                writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1617        }
1618}
1619
1620static int pl011_startup(struct uart_port *port)
1621{
1622        struct uart_amba_port *uap =
1623            container_of(port, struct uart_amba_port, port);
1624        unsigned int cr;
1625        int retval;
1626
1627        retval = pl011_hwinit(port);
1628        if (retval)
1629                goto clk_dis;
1630
1631        writew(uap->im, uap->port.membase + UART011_IMSC);
1632
1633        /*
1634         * Allocate the IRQ
1635         */
1636        retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1637        if (retval)
1638                goto clk_dis;
1639
1640        writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
1641
1642        /* Assume that TX IRQ doesn't work until we see one: */
1643        uap->tx_irq_seen = 0;
1644
1645        spin_lock_irq(&uap->port.lock);
1646
1647        /* restore RTS and DTR */
1648        cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1649        cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1650        writew(cr, uap->port.membase + UART011_CR);
1651
1652        spin_unlock_irq(&uap->port.lock);
1653
1654        /*
1655         * initialise the old status of the modem signals
1656         */
1657        uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1658
1659        /* Startup DMA */
1660        pl011_dma_startup(uap);
1661
1662        /*
1663         * Finally, enable interrupts, only timeouts when using DMA
1664         * if initial RX DMA job failed, start in interrupt mode
1665         * as well.
1666         */
1667        spin_lock_irq(&uap->port.lock);
1668        /* Clear out any spuriously appearing RX interrupts */
1669         writew(UART011_RTIS | UART011_RXIS,
1670                uap->port.membase + UART011_ICR);
1671        uap->im = UART011_RTIM;
1672        if (!pl011_dma_rx_running(uap))
1673                uap->im |= UART011_RXIM;
1674        writew(uap->im, uap->port.membase + UART011_IMSC);
1675        spin_unlock_irq(&uap->port.lock);
1676
1677        return 0;
1678
1679 clk_dis:
1680        clk_disable_unprepare(uap->clk);
1681        return retval;
1682}
1683
1684static void pl011_shutdown_channel(struct uart_amba_port *uap,
1685                                        unsigned int lcrh)
1686{
1687      unsigned long val;
1688
1689      val = readw(uap->port.membase + lcrh);
1690      val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1691      writew(val, uap->port.membase + lcrh);
1692}
1693
1694static void pl011_shutdown(struct uart_port *port)
1695{
1696        struct uart_amba_port *uap =
1697            container_of(port, struct uart_amba_port, port);
1698        unsigned int cr;
1699
1700        cancel_delayed_work_sync(&uap->tx_softirq_work);
1701
1702        /*
1703         * disable all interrupts
1704         */
1705        spin_lock_irq(&uap->port.lock);
1706        uap->im = 0;
1707        writew(uap->im, uap->port.membase + UART011_IMSC);
1708        writew(0xffff, uap->port.membase + UART011_ICR);
1709        spin_unlock_irq(&uap->port.lock);
1710
1711        pl011_dma_shutdown(uap);
1712
1713        /*
1714         * Free the interrupt
1715         */
1716        free_irq(uap->port.irq, uap);
1717
1718        /*
1719         * disable the port
1720         * disable the port. It should not disable RTS and DTR.
1721         * Also RTS and DTR state should be preserved to restore
1722         * it during startup().
1723         */
1724        uap->autorts = false;
1725        spin_lock_irq(&uap->port.lock);
1726        cr = readw(uap->port.membase + UART011_CR);
1727        uap->old_cr = cr;
1728        cr &= UART011_CR_RTS | UART011_CR_DTR;
1729        cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1730        writew(cr, uap->port.membase + UART011_CR);
1731        spin_unlock_irq(&uap->port.lock);
1732
1733        /*
1734         * disable break condition and fifos
1735         */
1736        pl011_shutdown_channel(uap, uap->lcrh_rx);
1737        if (uap->lcrh_rx != uap->lcrh_tx)
1738                pl011_shutdown_channel(uap, uap->lcrh_tx);
1739
1740        /*
1741         * Shut down the clock producer
1742         */
1743        clk_disable_unprepare(uap->clk);
1744        /* Optionally let pins go into sleep states */
1745        pinctrl_pm_select_sleep_state(port->dev);
1746
1747        if (dev_get_platdata(uap->port.dev)) {
1748                struct amba_pl011_data *plat;
1749
1750                plat = dev_get_platdata(uap->port.dev);
1751                if (plat->exit)
1752                        plat->exit();
1753        }
1754
1755        if (uap->port.ops->flush_buffer)
1756                uap->port.ops->flush_buffer(port);
1757}
1758
1759static void
1760pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1761                     struct ktermios *old)
1762{
1763        struct uart_amba_port *uap =
1764            container_of(port, struct uart_amba_port, port);
1765        unsigned int lcr_h, old_cr;
1766        unsigned long flags;
1767        unsigned int baud, quot, clkdiv;
1768
1769        if (uap->vendor->oversampling)
1770                clkdiv = 8;
1771        else
1772                clkdiv = 16;
1773
1774        /*
1775         * Ask the core to calculate the divisor for us.
1776         */
1777        baud = uart_get_baud_rate(port, termios, old, 0,
1778                                  port->uartclk / clkdiv);
1779#ifdef CONFIG_DMA_ENGINE
1780        /*
1781         * Adjust RX DMA polling rate with baud rate if not specified.
1782         */
1783        if (uap->dmarx.auto_poll_rate)
1784                uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
1785#endif
1786
1787        if (baud > port->uartclk/16)
1788                quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1789        else
1790                quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1791
1792        switch (termios->c_cflag & CSIZE) {
1793        case CS5:
1794                lcr_h = UART01x_LCRH_WLEN_5;
1795                break;
1796        case CS6:
1797                lcr_h = UART01x_LCRH_WLEN_6;
1798                break;
1799        case CS7:
1800                lcr_h = UART01x_LCRH_WLEN_7;
1801                break;
1802        default: // CS8
1803                lcr_h = UART01x_LCRH_WLEN_8;
1804                break;
1805        }
1806        if (termios->c_cflag & CSTOPB)
1807                lcr_h |= UART01x_LCRH_STP2;
1808        if (termios->c_cflag & PARENB) {
1809                lcr_h |= UART01x_LCRH_PEN;
1810                if (!(termios->c_cflag & PARODD))
1811                        lcr_h |= UART01x_LCRH_EPS;
1812        }
1813        if (uap->fifosize > 1)
1814                lcr_h |= UART01x_LCRH_FEN;
1815
1816        spin_lock_irqsave(&port->lock, flags);
1817
1818        /*
1819         * Update the per-port timeout.
1820         */
1821        uart_update_timeout(port, termios->c_cflag, baud);
1822
1823        port->read_status_mask = UART011_DR_OE | 255;
1824        if (termios->c_iflag & INPCK)
1825                port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1826        if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1827                port->read_status_mask |= UART011_DR_BE;
1828
1829        /*
1830         * Characters to ignore
1831         */
1832        port->ignore_status_mask = 0;
1833        if (termios->c_iflag & IGNPAR)
1834                port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1835        if (termios->c_iflag & IGNBRK) {
1836                port->ignore_status_mask |= UART011_DR_BE;
1837                /*
1838                 * If we're ignoring parity and break indicators,
1839                 * ignore overruns too (for real raw support).
1840                 */
1841                if (termios->c_iflag & IGNPAR)
1842                        port->ignore_status_mask |= UART011_DR_OE;
1843        }
1844
1845        /*
1846         * Ignore all characters if CREAD is not set.
1847         */
1848        if ((termios->c_cflag & CREAD) == 0)
1849                port->ignore_status_mask |= UART_DUMMY_DR_RX;
1850
1851        if (UART_ENABLE_MS(port, termios->c_cflag))
1852                pl011_enable_ms(port);
1853
1854        /* first, disable everything */
1855        old_cr = readw(port->membase + UART011_CR);
1856        writew(0, port->membase + UART011_CR);
1857
1858        if (termios->c_cflag & CRTSCTS) {
1859                if (old_cr & UART011_CR_RTS)
1860                        old_cr |= UART011_CR_RTSEN;
1861
1862                old_cr |= UART011_CR_CTSEN;
1863                uap->autorts = true;
1864        } else {
1865                old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
1866                uap->autorts = false;
1867        }
1868
1869        if (uap->vendor->oversampling) {
1870                if (baud > port->uartclk / 16)
1871                        old_cr |= ST_UART011_CR_OVSFACT;
1872                else
1873                        old_cr &= ~ST_UART011_CR_OVSFACT;
1874        }
1875
1876        /*
1877         * Workaround for the ST Micro oversampling variants to
1878         * increase the bitrate slightly, by lowering the divisor,
1879         * to avoid delayed sampling of start bit at high speeds,
1880         * else we see data corruption.
1881         */
1882        if (uap->vendor->oversampling) {
1883                if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
1884                        quot -= 1;
1885                else if ((baud > 3250000) && (quot > 2))
1886                        quot -= 2;
1887        }
1888        /* Set baud rate */
1889        writew(quot & 0x3f, port->membase + UART011_FBRD);
1890        writew(quot >> 6, port->membase + UART011_IBRD);
1891
1892        /*
1893         * ----------v----------v----------v----------v-----
1894         * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
1895         * UART011_FBRD & UART011_IBRD.
1896         * ----------^----------^----------^----------^-----
1897         */
1898        pl011_write_lcr_h(uap, lcr_h);
1899        writew(old_cr, port->membase + UART011_CR);
1900
1901        spin_unlock_irqrestore(&port->lock, flags);
1902}
1903
1904static const char *pl011_type(struct uart_port *port)
1905{
1906        struct uart_amba_port *uap =
1907            container_of(port, struct uart_amba_port, port);
1908        return uap->port.type == PORT_AMBA ? uap->type : NULL;
1909}
1910
1911/*
1912 * Release the memory region(s) being used by 'port'
1913 */
1914static void pl011_release_port(struct uart_port *port)
1915{
1916        release_mem_region(port->mapbase, SZ_4K);
1917}
1918
1919/*
1920 * Request the memory region(s) being used by 'port'
1921 */
1922static int pl011_request_port(struct uart_port *port)
1923{
1924        return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
1925                        != NULL ? 0 : -EBUSY;
1926}
1927
1928/*
1929 * Configure/autoconfigure the port.
1930 */
1931static void pl011_config_port(struct uart_port *port, int flags)
1932{
1933        if (flags & UART_CONFIG_TYPE) {
1934                port->type = PORT_AMBA;
1935                pl011_request_port(port);
1936        }
1937}
1938
1939/*
1940 * verify the new serial_struct (for TIOCSSERIAL).
1941 */
1942static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
1943{
1944        int ret = 0;
1945        if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
1946                ret = -EINVAL;
1947        if (ser->irq < 0 || ser->irq >= nr_irqs)
1948                ret = -EINVAL;
1949        if (ser->baud_base < 9600)
1950                ret = -EINVAL;
1951        return ret;
1952}
1953
1954static struct uart_ops amba_pl011_pops = {
1955        .tx_empty       = pl011_tx_empty,
1956        .set_mctrl      = pl011_set_mctrl,
1957        .get_mctrl      = pl011_get_mctrl,
1958        .stop_tx        = pl011_stop_tx,
1959        .start_tx       = pl011_start_tx,
1960        .stop_rx        = pl011_stop_rx,
1961        .enable_ms      = pl011_enable_ms,
1962        .break_ctl      = pl011_break_ctl,
1963        .startup        = pl011_startup,
1964        .shutdown       = pl011_shutdown,
1965        .flush_buffer   = pl011_dma_flush_buffer,
1966        .set_termios    = pl011_set_termios,
1967        .type           = pl011_type,
1968        .release_port   = pl011_release_port,
1969        .request_port   = pl011_request_port,
1970        .config_port    = pl011_config_port,
1971        .verify_port    = pl011_verify_port,
1972#ifdef CONFIG_CONSOLE_POLL
1973        .poll_init     = pl011_hwinit,
1974        .poll_get_char = pl011_get_poll_char,
1975        .poll_put_char = pl011_put_poll_char,
1976#endif
1977};
1978
1979static struct uart_amba_port *amba_ports[UART_NR];
1980
1981#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
1982
1983static void pl011_console_putchar(struct uart_port *port, int ch)
1984{
1985        struct uart_amba_port *uap =
1986            container_of(port, struct uart_amba_port, port);
1987
1988        while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1989                barrier();
1990        writew(ch, uap->port.membase + UART01x_DR);
1991}
1992
1993static void
1994pl011_console_write(struct console *co, const char *s, unsigned int count)
1995{
1996        struct uart_amba_port *uap = amba_ports[co->index];
1997        unsigned int status, old_cr, new_cr;
1998        unsigned long flags;
1999        int locked = 1;
2000
2001        clk_enable(uap->clk);
2002
2003        local_irq_save(flags);
2004        if (uap->port.sysrq)
2005                locked = 0;
2006        else if (oops_in_progress)
2007                locked = spin_trylock(&uap->port.lock);
2008        else
2009                spin_lock(&uap->port.lock);
2010
2011        /*
2012         *      First save the CR then disable the interrupts
2013         */
2014        old_cr = readw(uap->port.membase + UART011_CR);
2015        new_cr = old_cr & ~UART011_CR_CTSEN;
2016        new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2017        writew(new_cr, uap->port.membase + UART011_CR);
2018
2019        uart_console_write(&uap->port, s, count, pl011_console_putchar);
2020
2021        /*
2022         *      Finally, wait for transmitter to become empty
2023         *      and restore the TCR
2024         */
2025        do {
2026                status = readw(uap->port.membase + UART01x_FR);
2027        } while (status & UART01x_FR_BUSY);
2028        writew(old_cr, uap->port.membase + UART011_CR);
2029
2030        if (locked)
2031                spin_unlock(&uap->port.lock);
2032        local_irq_restore(flags);
2033
2034        clk_disable(uap->clk);
2035}
2036
2037static void __init
2038pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2039                             int *parity, int *bits)
2040{
2041        if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) {
2042                unsigned int lcr_h, ibrd, fbrd;
2043
2044                lcr_h = readw(uap->port.membase + uap->lcrh_tx);
2045
2046                *parity = 'n';
2047                if (lcr_h & UART01x_LCRH_PEN) {
2048                        if (lcr_h & UART01x_LCRH_EPS)
2049                                *parity = 'e';
2050                        else
2051                                *parity = 'o';
2052                }
2053
2054                if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2055                        *bits = 7;
2056                else
2057                        *bits = 8;
2058
2059                ibrd = readw(uap->port.membase + UART011_IBRD);
2060                fbrd = readw(uap->port.membase + UART011_FBRD);
2061
2062                *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2063
2064                if (uap->vendor->oversampling) {
2065                        if (readw(uap->port.membase + UART011_CR)
2066                                  & ST_UART011_CR_OVSFACT)
2067                                *baud *= 2;
2068                }
2069        }
2070}
2071
2072static int __init pl011_console_setup(struct console *co, char *options)
2073{
2074        struct uart_amba_port *uap;
2075        int baud = 38400;
2076        int bits = 8;
2077        int parity = 'n';
2078        int flow = 'n';
2079        int ret;
2080
2081        /*
2082         * Check whether an invalid uart number has been specified, and
2083         * if so, search for the first available port that does have
2084         * console support.
2085         */
2086        if (co->index >= UART_NR)
2087                co->index = 0;
2088        uap = amba_ports[co->index];
2089        if (!uap)
2090                return -ENODEV;
2091
2092        /* Allow pins to be muxed in and configured */
2093        pinctrl_pm_select_default_state(uap->port.dev);
2094
2095        ret = clk_prepare(uap->clk);
2096        if (ret)
2097                return ret;
2098
2099        if (dev_get_platdata(uap->port.dev)) {
2100                struct amba_pl011_data *plat;
2101
2102                plat = dev_get_platdata(uap->port.dev);
2103                if (plat->init)
2104                        plat->init();
2105        }
2106
2107        uap->port.uartclk = clk_get_rate(uap->clk);
2108
2109        if (options)
2110                uart_parse_options(options, &baud, &parity, &bits, &flow);
2111        else
2112                pl011_console_get_options(uap, &baud, &parity, &bits);
2113
2114        return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2115}
2116
2117static struct uart_driver amba_reg;
2118static struct console amba_console = {
2119        .name           = "ttyAMA",
2120        .write          = pl011_console_write,
2121        .device         = uart_console_device,
2122        .setup          = pl011_console_setup,
2123        .flags          = CON_PRINTBUFFER,
2124        .index          = -1,
2125        .data           = &amba_reg,
2126};
2127
2128#define AMBA_CONSOLE    (&amba_console)
2129
2130static void pl011_putc(struct uart_port *port, int c)
2131{
2132        while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2133                ;
2134        writeb(c, port->membase + UART01x_DR);
2135        while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2136                ;
2137}
2138
2139static void pl011_early_write(struct console *con, const char *s, unsigned n)
2140{
2141        struct earlycon_device *dev = con->data;
2142
2143        uart_console_write(&dev->port, s, n, pl011_putc);
2144}
2145
2146static int __init pl011_early_console_setup(struct earlycon_device *device,
2147                                            const char *opt)
2148{
2149        if (!device->port.membase)
2150                return -ENODEV;
2151
2152        device->con->write = pl011_early_write;
2153        return 0;
2154}
2155EARLYCON_DECLARE(pl011, pl011_early_console_setup);
2156OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2157
2158#else
2159#define AMBA_CONSOLE    NULL
2160#endif
2161
2162static struct uart_driver amba_reg = {
2163        .owner                  = THIS_MODULE,
2164        .driver_name            = "ttyAMA",
2165        .dev_name               = "ttyAMA",
2166        .major                  = SERIAL_AMBA_MAJOR,
2167        .minor                  = SERIAL_AMBA_MINOR,
2168        .nr                     = UART_NR,
2169        .cons                   = AMBA_CONSOLE,
2170};
2171
2172static int pl011_probe_dt_alias(int index, struct device *dev)
2173{
2174        struct device_node *np;
2175        static bool seen_dev_with_alias = false;
2176        static bool seen_dev_without_alias = false;
2177        int ret = index;
2178
2179        if (!IS_ENABLED(CONFIG_OF))
2180                return ret;
2181
2182        np = dev->of_node;
2183        if (!np)
2184                return ret;
2185
2186        ret = of_alias_get_id(np, "serial");
2187        if (IS_ERR_VALUE(ret)) {
2188                seen_dev_without_alias = true;
2189                ret = index;
2190        } else {
2191                seen_dev_with_alias = true;
2192                if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2193                        dev_warn(dev, "requested serial port %d  not available.\n", ret);
2194                        ret = index;
2195                }
2196        }
2197
2198        if (seen_dev_with_alias && seen_dev_without_alias)
2199                dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2200
2201        return ret;
2202}
2203
2204static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2205{
2206        struct uart_amba_port *uap;
2207        struct vendor_data *vendor = id->data;
2208        void __iomem *base;
2209        int i, ret;
2210
2211        for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2212                if (amba_ports[i] == NULL)
2213                        break;
2214
2215        if (i == ARRAY_SIZE(amba_ports))
2216                return -EBUSY;
2217
2218        uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2219                           GFP_KERNEL);
2220        if (uap == NULL)
2221                return -ENOMEM;
2222
2223        i = pl011_probe_dt_alias(i, &dev->dev);
2224
2225        base = devm_ioremap(&dev->dev, dev->res.start,
2226                            resource_size(&dev->res));
2227        if (!base)
2228                return -ENOMEM;
2229
2230        uap->clk = devm_clk_get(&dev->dev, NULL);
2231        if (IS_ERR(uap->clk))
2232                return PTR_ERR(uap->clk);
2233
2234        uap->vendor = vendor;
2235        uap->lcrh_rx = vendor->lcrh_rx;
2236        uap->lcrh_tx = vendor->lcrh_tx;
2237        uap->old_cr = 0;
2238        uap->fifosize = vendor->get_fifosize(dev);
2239        uap->port.dev = &dev->dev;
2240        uap->port.mapbase = dev->res.start;
2241        uap->port.membase = base;
2242        uap->port.iotype = UPIO_MEM;
2243        uap->port.irq = dev->irq[0];
2244        uap->port.fifosize = uap->fifosize;
2245        uap->port.ops = &amba_pl011_pops;
2246        uap->port.flags = UPF_BOOT_AUTOCONF;
2247        uap->port.line = i;
2248        INIT_DELAYED_WORK(&uap->tx_softirq_work, pl011_tx_softirq);
2249
2250        /* Ensure interrupts from this UART are masked and cleared */
2251        writew(0, uap->port.membase + UART011_IMSC);
2252        writew(0xffff, uap->port.membase + UART011_ICR);
2253
2254        snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2255
2256        amba_ports[i] = uap;
2257
2258        amba_set_drvdata(dev, uap);
2259
2260        if (!amba_reg.state) {
2261                ret = uart_register_driver(&amba_reg);
2262                if (ret < 0) {
2263                        dev_err(&dev->dev,
2264                                "Failed to register AMBA-PL011 driver\n");
2265                        return ret;
2266                }
2267        }
2268
2269        ret = uart_add_one_port(&amba_reg, &uap->port);
2270        if (ret) {
2271                amba_ports[i] = NULL;
2272                uart_unregister_driver(&amba_reg);
2273        }
2274
2275        return ret;
2276}
2277
2278static int pl011_remove(struct amba_device *dev)
2279{
2280        struct uart_amba_port *uap = amba_get_drvdata(dev);
2281        bool busy = false;
2282        int i;
2283
2284        uart_remove_one_port(&amba_reg, &uap->port);
2285
2286        for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2287                if (amba_ports[i] == uap)
2288                        amba_ports[i] = NULL;
2289                else if (amba_ports[i])
2290                        busy = true;
2291
2292        pl011_dma_remove(uap);
2293        if (!busy)
2294                uart_unregister_driver(&amba_reg);
2295        return 0;
2296}
2297
2298#ifdef CONFIG_PM_SLEEP
2299static int pl011_suspend(struct device *dev)
2300{
2301        struct uart_amba_port *uap = dev_get_drvdata(dev);
2302
2303        if (!uap)
2304                return -EINVAL;
2305
2306        return uart_suspend_port(&amba_reg, &uap->port);
2307}
2308
2309static int pl011_resume(struct device *dev)
2310{
2311        struct uart_amba_port *uap = dev_get_drvdata(dev);
2312
2313        if (!uap)
2314                return -EINVAL;
2315
2316        return uart_resume_port(&amba_reg, &uap->port);
2317}
2318#endif
2319
2320static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2321
2322static struct amba_id pl011_ids[] = {
2323        {
2324                .id     = 0x00041011,
2325                .mask   = 0x000fffff,
2326                .data   = &vendor_arm,
2327        },
2328        {
2329                .id     = 0x00380802,
2330                .mask   = 0x00ffffff,
2331                .data   = &vendor_st,
2332        },
2333        { 0, 0 },
2334};
2335
2336MODULE_DEVICE_TABLE(amba, pl011_ids);
2337
2338static struct amba_driver pl011_driver = {
2339        .drv = {
2340                .name   = "uart-pl011",
2341                .pm     = &pl011_dev_pm_ops,
2342        },
2343        .id_table       = pl011_ids,
2344        .probe          = pl011_probe,
2345        .remove         = pl011_remove,
2346};
2347
2348static int __init pl011_init(void)
2349{
2350        printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2351
2352        return amba_driver_register(&pl011_driver);
2353}
2354
2355static void __exit pl011_exit(void)
2356{
2357        amba_driver_unregister(&pl011_driver);
2358}
2359
2360/*
2361 * While this can be a module, if builtin it's most likely the console
2362 * So let's leave module_exit but move module_init to an earlier place
2363 */
2364arch_initcall(pl011_init);
2365module_exit(pl011_exit);
2366
2367MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2368MODULE_DESCRIPTION("ARM AMBA serial port driver");
2369MODULE_LICENSE("GPL");
2370