linux/drivers/tty/serial/amba-pl011.c
<<
>>
Prefs
   1/*
   2 *  Driver for AMBA serial ports
   3 *
   4 *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
   5 *
   6 *  Copyright 1999 ARM Limited
   7 *  Copyright (C) 2000 Deep Blue Solutions Ltd.
   8 *  Copyright (C) 2010 ST-Ericsson SA
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  23 *
  24 * This is a generic driver for ARM AMBA-type serial ports.  They
  25 * have a lot of 16550-like features, but are not register compatible.
  26 * Note that although they do have CTS, DCD and DSR inputs, they do
  27 * not have an RI input, nor do they have DTR or RTS outputs.  If
  28 * required, these have to be supplied via some other means (eg, GPIO)
  29 * and hooked into this driver.
  30 */
  31
  32#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
  33#define SUPPORT_SYSRQ
  34#endif
  35
  36#include <linux/module.h>
  37#include <linux/ioport.h>
  38#include <linux/init.h>
  39#include <linux/console.h>
  40#include <linux/sysrq.h>
  41#include <linux/device.h>
  42#include <linux/tty.h>
  43#include <linux/tty_flip.h>
  44#include <linux/serial_core.h>
  45#include <linux/serial.h>
  46#include <linux/amba/bus.h>
  47#include <linux/amba/serial.h>
  48#include <linux/clk.h>
  49#include <linux/slab.h>
  50#include <linux/dmaengine.h>
  51#include <linux/dma-mapping.h>
  52#include <linux/scatterlist.h>
  53#include <linux/delay.h>
  54#include <linux/types.h>
  55#include <linux/pinctrl/consumer.h>
  56
  57#include <asm/io.h>
  58#include <asm/sizes.h>
  59
  60#define UART_NR                 14
  61
  62#define SERIAL_AMBA_MAJOR       204
  63#define SERIAL_AMBA_MINOR       64
  64#define SERIAL_AMBA_NR          UART_NR
  65
  66#define AMBA_ISR_PASS_LIMIT     256
  67
  68#define UART_DR_ERROR           (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
  69#define UART_DUMMY_DR_RX        (1 << 16)
  70
  71/* There is by now at least one vendor with differing details, so handle it */
  72struct vendor_data {
  73        unsigned int            ifls;
  74        unsigned int            fifosize;
  75        unsigned int            lcrh_tx;
  76        unsigned int            lcrh_rx;
  77        bool                    oversampling;
  78        bool                    interrupt_may_hang;   /* vendor-specific */
  79        bool                    dma_threshold;
  80        bool                    cts_event_workaround;
  81};
  82
  83static struct vendor_data vendor_arm = {
  84        .ifls                   = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
  85        .fifosize               = 16,
  86        .lcrh_tx                = UART011_LCRH,
  87        .lcrh_rx                = UART011_LCRH,
  88        .oversampling           = false,
  89        .dma_threshold          = false,
  90        .cts_event_workaround   = false,
  91};
  92
  93static struct vendor_data vendor_st = {
  94        .ifls                   = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
  95        .fifosize               = 64,
  96        .lcrh_tx                = ST_UART011_LCRH_TX,
  97        .lcrh_rx                = ST_UART011_LCRH_RX,
  98        .oversampling           = true,
  99        .interrupt_may_hang     = true,
 100        .dma_threshold          = true,
 101        .cts_event_workaround   = true,
 102};
 103
 104static struct uart_amba_port *amba_ports[UART_NR];
 105
 106/* Deals with DMA transactions */
 107
 108struct pl011_sgbuf {
 109        struct scatterlist sg;
 110        char *buf;
 111};
 112
 113struct pl011_dmarx_data {
 114        struct dma_chan         *chan;
 115        struct completion       complete;
 116        bool                    use_buf_b;
 117        struct pl011_sgbuf      sgbuf_a;
 118        struct pl011_sgbuf      sgbuf_b;
 119        dma_cookie_t            cookie;
 120        bool                    running;
 121};
 122
 123struct pl011_dmatx_data {
 124        struct dma_chan         *chan;
 125        struct scatterlist      sg;
 126        char                    *buf;
 127        bool                    queued;
 128};
 129
 130/*
 131 * We wrap our port structure around the generic uart_port.
 132 */
 133struct uart_amba_port {
 134        struct uart_port        port;
 135        struct clk              *clk;
 136        /* Two optional pin states - default & sleep */
 137        struct pinctrl          *pinctrl;
 138        struct pinctrl_state    *pins_default;
 139        struct pinctrl_state    *pins_sleep;
 140        const struct vendor_data *vendor;
 141        unsigned int            dmacr;          /* dma control reg */
 142        unsigned int            im;             /* interrupt mask */
 143        unsigned int            old_status;
 144        unsigned int            fifosize;       /* vendor-specific */
 145        unsigned int            lcrh_tx;        /* vendor-specific */
 146        unsigned int            lcrh_rx;        /* vendor-specific */
 147        unsigned int            old_cr;         /* state during shutdown */
 148        bool                    autorts;
 149        char                    type[12];
 150        bool                    interrupt_may_hang; /* vendor-specific */
 151#ifdef CONFIG_DMA_ENGINE
 152        /* DMA stuff */
 153        bool                    using_tx_dma;
 154        bool                    using_rx_dma;
 155        struct pl011_dmarx_data dmarx;
 156        struct pl011_dmatx_data dmatx;
 157#endif
 158};
 159
 160/*
 161 * Reads up to 256 characters from the FIFO or until it's empty and
 162 * inserts them into the TTY layer. Returns the number of characters
 163 * read from the FIFO.
 164 */
 165static int pl011_fifo_to_tty(struct uart_amba_port *uap)
 166{
 167        u16 status, ch;
 168        unsigned int flag, max_count = 256;
 169        int fifotaken = 0;
 170
 171        while (max_count--) {
 172                status = readw(uap->port.membase + UART01x_FR);
 173                if (status & UART01x_FR_RXFE)
 174                        break;
 175
 176                /* Take chars from the FIFO and update status */
 177                ch = readw(uap->port.membase + UART01x_DR) |
 178                        UART_DUMMY_DR_RX;
 179                flag = TTY_NORMAL;
 180                uap->port.icount.rx++;
 181                fifotaken++;
 182
 183                if (unlikely(ch & UART_DR_ERROR)) {
 184                        if (ch & UART011_DR_BE) {
 185                                ch &= ~(UART011_DR_FE | UART011_DR_PE);
 186                                uap->port.icount.brk++;
 187                                if (uart_handle_break(&uap->port))
 188                                        continue;
 189                        } else if (ch & UART011_DR_PE)
 190                                uap->port.icount.parity++;
 191                        else if (ch & UART011_DR_FE)
 192                                uap->port.icount.frame++;
 193                        if (ch & UART011_DR_OE)
 194                                uap->port.icount.overrun++;
 195
 196                        ch &= uap->port.read_status_mask;
 197
 198                        if (ch & UART011_DR_BE)
 199                                flag = TTY_BREAK;
 200                        else if (ch & UART011_DR_PE)
 201                                flag = TTY_PARITY;
 202                        else if (ch & UART011_DR_FE)
 203                                flag = TTY_FRAME;
 204                }
 205
 206                if (uart_handle_sysrq_char(&uap->port, ch & 255))
 207                        continue;
 208
 209                uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
 210        }
 211
 212        return fifotaken;
 213}
 214
 215
 216/*
 217 * All the DMA operation mode stuff goes inside this ifdef.
 218 * This assumes that you have a generic DMA device interface,
 219 * no custom DMA interfaces are supported.
 220 */
 221#ifdef CONFIG_DMA_ENGINE
 222
 223#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
 224
 225static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
 226        enum dma_data_direction dir)
 227{
 228        sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
 229        if (!sg->buf)
 230                return -ENOMEM;
 231
 232        sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE);
 233
 234        if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
 235                kfree(sg->buf);
 236                return -EINVAL;
 237        }
 238        return 0;
 239}
 240
 241static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
 242        enum dma_data_direction dir)
 243{
 244        if (sg->buf) {
 245                dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir);
 246                kfree(sg->buf);
 247        }
 248}
 249
 250static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
 251{
 252        /* DMA is the sole user of the platform data right now */
 253        struct amba_pl011_data *plat = uap->port.dev->platform_data;
 254        struct dma_slave_config tx_conf = {
 255                .dst_addr = uap->port.mapbase + UART01x_DR,
 256                .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
 257                .direction = DMA_MEM_TO_DEV,
 258                .dst_maxburst = uap->fifosize >> 1,
 259                .device_fc = false,
 260        };
 261        struct dma_chan *chan;
 262        dma_cap_mask_t mask;
 263
 264        /* We need platform data */
 265        if (!plat || !plat->dma_filter) {
 266                dev_info(uap->port.dev, "no DMA platform data\n");
 267                return;
 268        }
 269
 270        /* Try to acquire a generic DMA engine slave TX channel */
 271        dma_cap_zero(mask);
 272        dma_cap_set(DMA_SLAVE, mask);
 273
 274        chan = dma_request_channel(mask, plat->dma_filter, plat->dma_tx_param);
 275        if (!chan) {
 276                dev_err(uap->port.dev, "no TX DMA channel!\n");
 277                return;
 278        }
 279
 280        dmaengine_slave_config(chan, &tx_conf);
 281        uap->dmatx.chan = chan;
 282
 283        dev_info(uap->port.dev, "DMA channel TX %s\n",
 284                 dma_chan_name(uap->dmatx.chan));
 285
 286        /* Optionally make use of an RX channel as well */
 287        if (plat->dma_rx_param) {
 288                struct dma_slave_config rx_conf = {
 289                        .src_addr = uap->port.mapbase + UART01x_DR,
 290                        .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
 291                        .direction = DMA_DEV_TO_MEM,
 292                        .src_maxburst = uap->fifosize >> 1,
 293                        .device_fc = false,
 294                };
 295
 296                chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
 297                if (!chan) {
 298                        dev_err(uap->port.dev, "no RX DMA channel!\n");
 299                        return;
 300                }
 301
 302                dmaengine_slave_config(chan, &rx_conf);
 303                uap->dmarx.chan = chan;
 304
 305                dev_info(uap->port.dev, "DMA channel RX %s\n",
 306                         dma_chan_name(uap->dmarx.chan));
 307        }
 308}
 309
 310#ifndef MODULE
 311/*
 312 * Stack up the UARTs and let the above initcall be done at device
 313 * initcall time, because the serial driver is called as an arch
 314 * initcall, and at this time the DMA subsystem is not yet registered.
 315 * At this point the driver will switch over to using DMA where desired.
 316 */
 317struct dma_uap {
 318        struct list_head node;
 319        struct uart_amba_port *uap;
 320};
 321
 322static LIST_HEAD(pl011_dma_uarts);
 323
 324static int __init pl011_dma_initcall(void)
 325{
 326        struct list_head *node, *tmp;
 327
 328        list_for_each_safe(node, tmp, &pl011_dma_uarts) {
 329                struct dma_uap *dmau = list_entry(node, struct dma_uap, node);
 330                pl011_dma_probe_initcall(dmau->uap);
 331                list_del(node);
 332                kfree(dmau);
 333        }
 334        return 0;
 335}
 336
 337device_initcall(pl011_dma_initcall);
 338
 339static void pl011_dma_probe(struct uart_amba_port *uap)
 340{
 341        struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL);
 342        if (dmau) {
 343                dmau->uap = uap;
 344                list_add_tail(&dmau->node, &pl011_dma_uarts);
 345        }
 346}
 347#else
 348static void pl011_dma_probe(struct uart_amba_port *uap)
 349{
 350        pl011_dma_probe_initcall(uap);
 351}
 352#endif
 353
 354static void pl011_dma_remove(struct uart_amba_port *uap)
 355{
 356        /* TODO: remove the initcall if it has not yet executed */
 357        if (uap->dmatx.chan)
 358                dma_release_channel(uap->dmatx.chan);
 359        if (uap->dmarx.chan)
 360                dma_release_channel(uap->dmarx.chan);
 361}
 362
 363/* Forward declare this for the refill routine */
 364static int pl011_dma_tx_refill(struct uart_amba_port *uap);
 365
 366/*
 367 * The current DMA TX buffer has been sent.
 368 * Try to queue up another DMA buffer.
 369 */
 370static void pl011_dma_tx_callback(void *data)
 371{
 372        struct uart_amba_port *uap = data;
 373        struct pl011_dmatx_data *dmatx = &uap->dmatx;
 374        unsigned long flags;
 375        u16 dmacr;
 376
 377        spin_lock_irqsave(&uap->port.lock, flags);
 378        if (uap->dmatx.queued)
 379                dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
 380                             DMA_TO_DEVICE);
 381
 382        dmacr = uap->dmacr;
 383        uap->dmacr = dmacr & ~UART011_TXDMAE;
 384        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 385
 386        /*
 387         * If TX DMA was disabled, it means that we've stopped the DMA for
 388         * some reason (eg, XOFF received, or we want to send an X-char.)
 389         *
 390         * Note: we need to be careful here of a potential race between DMA
 391         * and the rest of the driver - if the driver disables TX DMA while
 392         * a TX buffer completing, we must update the tx queued status to
 393         * get further refills (hence we check dmacr).
 394         */
 395        if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
 396            uart_circ_empty(&uap->port.state->xmit)) {
 397                uap->dmatx.queued = false;
 398                spin_unlock_irqrestore(&uap->port.lock, flags);
 399                return;
 400        }
 401
 402        if (pl011_dma_tx_refill(uap) <= 0) {
 403                /*
 404                 * We didn't queue a DMA buffer for some reason, but we
 405                 * have data pending to be sent.  Re-enable the TX IRQ.
 406                 */
 407                uap->im |= UART011_TXIM;
 408                writew(uap->im, uap->port.membase + UART011_IMSC);
 409        }
 410        spin_unlock_irqrestore(&uap->port.lock, flags);
 411}
 412
 413/*
 414 * Try to refill the TX DMA buffer.
 415 * Locking: called with port lock held and IRQs disabled.
 416 * Returns:
 417 *   1 if we queued up a TX DMA buffer.
 418 *   0 if we didn't want to handle this by DMA
 419 *  <0 on error
 420 */
 421static int pl011_dma_tx_refill(struct uart_amba_port *uap)
 422{
 423        struct pl011_dmatx_data *dmatx = &uap->dmatx;
 424        struct dma_chan *chan = dmatx->chan;
 425        struct dma_device *dma_dev = chan->device;
 426        struct dma_async_tx_descriptor *desc;
 427        struct circ_buf *xmit = &uap->port.state->xmit;
 428        unsigned int count;
 429
 430        /*
 431         * Try to avoid the overhead involved in using DMA if the
 432         * transaction fits in the first half of the FIFO, by using
 433         * the standard interrupt handling.  This ensures that we
 434         * issue a uart_write_wakeup() at the appropriate time.
 435         */
 436        count = uart_circ_chars_pending(xmit);
 437        if (count < (uap->fifosize >> 1)) {
 438                uap->dmatx.queued = false;
 439                return 0;
 440        }
 441
 442        /*
 443         * Bodge: don't send the last character by DMA, as this
 444         * will prevent XON from notifying us to restart DMA.
 445         */
 446        count -= 1;
 447
 448        /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
 449        if (count > PL011_DMA_BUFFER_SIZE)
 450                count = PL011_DMA_BUFFER_SIZE;
 451
 452        if (xmit->tail < xmit->head)
 453                memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
 454        else {
 455                size_t first = UART_XMIT_SIZE - xmit->tail;
 456                size_t second = xmit->head;
 457
 458                memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
 459                if (second)
 460                        memcpy(&dmatx->buf[first], &xmit->buf[0], second);
 461        }
 462
 463        dmatx->sg.length = count;
 464
 465        if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
 466                uap->dmatx.queued = false;
 467                dev_dbg(uap->port.dev, "unable to map TX DMA\n");
 468                return -EBUSY;
 469        }
 470
 471        desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
 472                                             DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 473        if (!desc) {
 474                dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
 475                uap->dmatx.queued = false;
 476                /*
 477                 * If DMA cannot be used right now, we complete this
 478                 * transaction via IRQ and let the TTY layer retry.
 479                 */
 480                dev_dbg(uap->port.dev, "TX DMA busy\n");
 481                return -EBUSY;
 482        }
 483
 484        /* Some data to go along to the callback */
 485        desc->callback = pl011_dma_tx_callback;
 486        desc->callback_param = uap;
 487
 488        /* All errors should happen at prepare time */
 489        dmaengine_submit(desc);
 490
 491        /* Fire the DMA transaction */
 492        dma_dev->device_issue_pending(chan);
 493
 494        uap->dmacr |= UART011_TXDMAE;
 495        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 496        uap->dmatx.queued = true;
 497
 498        /*
 499         * Now we know that DMA will fire, so advance the ring buffer
 500         * with the stuff we just dispatched.
 501         */
 502        xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
 503        uap->port.icount.tx += count;
 504
 505        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 506                uart_write_wakeup(&uap->port);
 507
 508        return 1;
 509}
 510
 511/*
 512 * We received a transmit interrupt without a pending X-char but with
 513 * pending characters.
 514 * Locking: called with port lock held and IRQs disabled.
 515 * Returns:
 516 *   false if we want to use PIO to transmit
 517 *   true if we queued a DMA buffer
 518 */
 519static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
 520{
 521        if (!uap->using_tx_dma)
 522                return false;
 523
 524        /*
 525         * If we already have a TX buffer queued, but received a
 526         * TX interrupt, it will be because we've just sent an X-char.
 527         * Ensure the TX DMA is enabled and the TX IRQ is disabled.
 528         */
 529        if (uap->dmatx.queued) {
 530                uap->dmacr |= UART011_TXDMAE;
 531                writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 532                uap->im &= ~UART011_TXIM;
 533                writew(uap->im, uap->port.membase + UART011_IMSC);
 534                return true;
 535        }
 536
 537        /*
 538         * We don't have a TX buffer queued, so try to queue one.
 539         * If we successfully queued a buffer, mask the TX IRQ.
 540         */
 541        if (pl011_dma_tx_refill(uap) > 0) {
 542                uap->im &= ~UART011_TXIM;
 543                writew(uap->im, uap->port.membase + UART011_IMSC);
 544                return true;
 545        }
 546        return false;
 547}
 548
 549/*
 550 * Stop the DMA transmit (eg, due to received XOFF).
 551 * Locking: called with port lock held and IRQs disabled.
 552 */
 553static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
 554{
 555        if (uap->dmatx.queued) {
 556                uap->dmacr &= ~UART011_TXDMAE;
 557                writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 558        }
 559}
 560
 561/*
 562 * Try to start a DMA transmit, or in the case of an XON/OFF
 563 * character queued for send, try to get that character out ASAP.
 564 * Locking: called with port lock held and IRQs disabled.
 565 * Returns:
 566 *   false if we want the TX IRQ to be enabled
 567 *   true if we have a buffer queued
 568 */
 569static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
 570{
 571        u16 dmacr;
 572
 573        if (!uap->using_tx_dma)
 574                return false;
 575
 576        if (!uap->port.x_char) {
 577                /* no X-char, try to push chars out in DMA mode */
 578                bool ret = true;
 579
 580                if (!uap->dmatx.queued) {
 581                        if (pl011_dma_tx_refill(uap) > 0) {
 582                                uap->im &= ~UART011_TXIM;
 583                                ret = true;
 584                        } else {
 585                                uap->im |= UART011_TXIM;
 586                                ret = false;
 587                        }
 588                        writew(uap->im, uap->port.membase + UART011_IMSC);
 589                } else if (!(uap->dmacr & UART011_TXDMAE)) {
 590                        uap->dmacr |= UART011_TXDMAE;
 591                        writew(uap->dmacr,
 592                                       uap->port.membase + UART011_DMACR);
 593                }
 594                return ret;
 595        }
 596
 597        /*
 598         * We have an X-char to send.  Disable DMA to prevent it loading
 599         * the TX fifo, and then see if we can stuff it into the FIFO.
 600         */
 601        dmacr = uap->dmacr;
 602        uap->dmacr &= ~UART011_TXDMAE;
 603        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 604
 605        if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
 606                /*
 607                 * No space in the FIFO, so enable the transmit interrupt
 608                 * so we know when there is space.  Note that once we've
 609                 * loaded the character, we should just re-enable DMA.
 610                 */
 611                return false;
 612        }
 613
 614        writew(uap->port.x_char, uap->port.membase + UART01x_DR);
 615        uap->port.icount.tx++;
 616        uap->port.x_char = 0;
 617
 618        /* Success - restore the DMA state */
 619        uap->dmacr = dmacr;
 620        writew(dmacr, uap->port.membase + UART011_DMACR);
 621
 622        return true;
 623}
 624
 625/*
 626 * Flush the transmit buffer.
 627 * Locking: called with port lock held and IRQs disabled.
 628 */
 629static void pl011_dma_flush_buffer(struct uart_port *port)
 630{
 631        struct uart_amba_port *uap = (struct uart_amba_port *)port;
 632
 633        if (!uap->using_tx_dma)
 634                return;
 635
 636        /* Avoid deadlock with the DMA engine callback */
 637        spin_unlock(&uap->port.lock);
 638        dmaengine_terminate_all(uap->dmatx.chan);
 639        spin_lock(&uap->port.lock);
 640        if (uap->dmatx.queued) {
 641                dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
 642                             DMA_TO_DEVICE);
 643                uap->dmatx.queued = false;
 644                uap->dmacr &= ~UART011_TXDMAE;
 645                writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 646        }
 647}
 648
 649static void pl011_dma_rx_callback(void *data);
 650
 651static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
 652{
 653        struct dma_chan *rxchan = uap->dmarx.chan;
 654        struct pl011_dmarx_data *dmarx = &uap->dmarx;
 655        struct dma_async_tx_descriptor *desc;
 656        struct pl011_sgbuf *sgbuf;
 657
 658        if (!rxchan)
 659                return -EIO;
 660
 661        /* Start the RX DMA job */
 662        sgbuf = uap->dmarx.use_buf_b ?
 663                &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
 664        desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
 665                                        DMA_DEV_TO_MEM,
 666                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 667        /*
 668         * If the DMA engine is busy and cannot prepare a
 669         * channel, no big deal, the driver will fall back
 670         * to interrupt mode as a result of this error code.
 671         */
 672        if (!desc) {
 673                uap->dmarx.running = false;
 674                dmaengine_terminate_all(rxchan);
 675                return -EBUSY;
 676        }
 677
 678        /* Some data to go along to the callback */
 679        desc->callback = pl011_dma_rx_callback;
 680        desc->callback_param = uap;
 681        dmarx->cookie = dmaengine_submit(desc);
 682        dma_async_issue_pending(rxchan);
 683
 684        uap->dmacr |= UART011_RXDMAE;
 685        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 686        uap->dmarx.running = true;
 687
 688        uap->im &= ~UART011_RXIM;
 689        writew(uap->im, uap->port.membase + UART011_IMSC);
 690
 691        return 0;
 692}
 693
 694/*
 695 * This is called when either the DMA job is complete, or
 696 * the FIFO timeout interrupt occurred. This must be called
 697 * with the port spinlock uap->port.lock held.
 698 */
 699static void pl011_dma_rx_chars(struct uart_amba_port *uap,
 700                               u32 pending, bool use_buf_b,
 701                               bool readfifo)
 702{
 703        struct tty_struct *tty = uap->port.state->port.tty;
 704        struct pl011_sgbuf *sgbuf = use_buf_b ?
 705                &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
 706        struct device *dev = uap->dmarx.chan->device->dev;
 707        int dma_count = 0;
 708        u32 fifotaken = 0; /* only used for vdbg() */
 709
 710        /* Pick everything from the DMA first */
 711        if (pending) {
 712                /* Sync in buffer */
 713                dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
 714
 715                /*
 716                 * First take all chars in the DMA pipe, then look in the FIFO.
 717                 * Note that tty_insert_flip_buf() tries to take as many chars
 718                 * as it can.
 719                 */
 720                dma_count = tty_insert_flip_string(uap->port.state->port.tty,
 721                                                   sgbuf->buf, pending);
 722
 723                /* Return buffer to device */
 724                dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
 725
 726                uap->port.icount.rx += dma_count;
 727                if (dma_count < pending)
 728                        dev_warn(uap->port.dev,
 729                                 "couldn't insert all characters (TTY is full?)\n");
 730        }
 731
 732        /*
 733         * Only continue with trying to read the FIFO if all DMA chars have
 734         * been taken first.
 735         */
 736        if (dma_count == pending && readfifo) {
 737                /* Clear any error flags */
 738                writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
 739                       uap->port.membase + UART011_ICR);
 740
 741                /*
 742                 * If we read all the DMA'd characters, and we had an
 743                 * incomplete buffer, that could be due to an rx error, or
 744                 * maybe we just timed out. Read any pending chars and check
 745                 * the error status.
 746                 *
 747                 * Error conditions will only occur in the FIFO, these will
 748                 * trigger an immediate interrupt and stop the DMA job, so we
 749                 * will always find the error in the FIFO, never in the DMA
 750                 * buffer.
 751                 */
 752                fifotaken = pl011_fifo_to_tty(uap);
 753        }
 754
 755        spin_unlock(&uap->port.lock);
 756        dev_vdbg(uap->port.dev,
 757                 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
 758                 dma_count, fifotaken);
 759        tty_flip_buffer_push(tty);
 760        spin_lock(&uap->port.lock);
 761}
 762
 763static void pl011_dma_rx_irq(struct uart_amba_port *uap)
 764{
 765        struct pl011_dmarx_data *dmarx = &uap->dmarx;
 766        struct dma_chan *rxchan = dmarx->chan;
 767        struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
 768                &dmarx->sgbuf_b : &dmarx->sgbuf_a;
 769        size_t pending;
 770        struct dma_tx_state state;
 771        enum dma_status dmastat;
 772
 773        /*
 774         * Pause the transfer so we can trust the current counter,
 775         * do this before we pause the PL011 block, else we may
 776         * overflow the FIFO.
 777         */
 778        if (dmaengine_pause(rxchan))
 779                dev_err(uap->port.dev, "unable to pause DMA transfer\n");
 780        dmastat = rxchan->device->device_tx_status(rxchan,
 781                                                   dmarx->cookie, &state);
 782        if (dmastat != DMA_PAUSED)
 783                dev_err(uap->port.dev, "unable to pause DMA transfer\n");
 784
 785        /* Disable RX DMA - incoming data will wait in the FIFO */
 786        uap->dmacr &= ~UART011_RXDMAE;
 787        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 788        uap->dmarx.running = false;
 789
 790        pending = sgbuf->sg.length - state.residue;
 791        BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
 792        /* Then we terminate the transfer - we now know our residue */
 793        dmaengine_terminate_all(rxchan);
 794
 795        /*
 796         * This will take the chars we have so far and insert
 797         * into the framework.
 798         */
 799        pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
 800
 801        /* Switch buffer & re-trigger DMA job */
 802        dmarx->use_buf_b = !dmarx->use_buf_b;
 803        if (pl011_dma_rx_trigger_dma(uap)) {
 804                dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
 805                        "fall back to interrupt mode\n");
 806                uap->im |= UART011_RXIM;
 807                writew(uap->im, uap->port.membase + UART011_IMSC);
 808        }
 809}
 810
 811static void pl011_dma_rx_callback(void *data)
 812{
 813        struct uart_amba_port *uap = data;
 814        struct pl011_dmarx_data *dmarx = &uap->dmarx;
 815        struct dma_chan *rxchan = dmarx->chan;
 816        bool lastbuf = dmarx->use_buf_b;
 817        struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
 818                &dmarx->sgbuf_b : &dmarx->sgbuf_a;
 819        size_t pending;
 820        struct dma_tx_state state;
 821        int ret;
 822
 823        /*
 824         * This completion interrupt occurs typically when the
 825         * RX buffer is totally stuffed but no timeout has yet
 826         * occurred. When that happens, we just want the RX
 827         * routine to flush out the secondary DMA buffer while
 828         * we immediately trigger the next DMA job.
 829         */
 830        spin_lock_irq(&uap->port.lock);
 831        /*
 832         * Rx data can be taken by the UART interrupts during
 833         * the DMA irq handler. So we check the residue here.
 834         */
 835        rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
 836        pending = sgbuf->sg.length - state.residue;
 837        BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
 838        /* Then we terminate the transfer - we now know our residue */
 839        dmaengine_terminate_all(rxchan);
 840
 841        uap->dmarx.running = false;
 842        dmarx->use_buf_b = !lastbuf;
 843        ret = pl011_dma_rx_trigger_dma(uap);
 844
 845        pl011_dma_rx_chars(uap, pending, lastbuf, false);
 846        spin_unlock_irq(&uap->port.lock);
 847        /*
 848         * Do this check after we picked the DMA chars so we don't
 849         * get some IRQ immediately from RX.
 850         */
 851        if (ret) {
 852                dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
 853                        "fall back to interrupt mode\n");
 854                uap->im |= UART011_RXIM;
 855                writew(uap->im, uap->port.membase + UART011_IMSC);
 856        }
 857}
 858
 859/*
 860 * Stop accepting received characters, when we're shutting down or
 861 * suspending this port.
 862 * Locking: called with port lock held and IRQs disabled.
 863 */
 864static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
 865{
 866        /* FIXME.  Just disable the DMA enable */
 867        uap->dmacr &= ~UART011_RXDMAE;
 868        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 869}
 870
 871static void pl011_dma_startup(struct uart_amba_port *uap)
 872{
 873        int ret;
 874
 875        if (!uap->dmatx.chan)
 876                return;
 877
 878        uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
 879        if (!uap->dmatx.buf) {
 880                dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
 881                uap->port.fifosize = uap->fifosize;
 882                return;
 883        }
 884
 885        sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
 886
 887        /* The DMA buffer is now the FIFO the TTY subsystem can use */
 888        uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
 889        uap->using_tx_dma = true;
 890
 891        if (!uap->dmarx.chan)
 892                goto skip_rx;
 893
 894        /* Allocate and map DMA RX buffers */
 895        ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
 896                               DMA_FROM_DEVICE);
 897        if (ret) {
 898                dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
 899                        "RX buffer A", ret);
 900                goto skip_rx;
 901        }
 902
 903        ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
 904                               DMA_FROM_DEVICE);
 905        if (ret) {
 906                dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
 907                        "RX buffer B", ret);
 908                pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
 909                                 DMA_FROM_DEVICE);
 910                goto skip_rx;
 911        }
 912
 913        uap->using_rx_dma = true;
 914
 915skip_rx:
 916        /* Turn on DMA error (RX/TX will be enabled on demand) */
 917        uap->dmacr |= UART011_DMAONERR;
 918        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 919
 920        /*
 921         * ST Micro variants has some specific dma burst threshold
 922         * compensation. Set this to 16 bytes, so burst will only
 923         * be issued above/below 16 bytes.
 924         */
 925        if (uap->vendor->dma_threshold)
 926                writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
 927                               uap->port.membase + ST_UART011_DMAWM);
 928
 929        if (uap->using_rx_dma) {
 930                if (pl011_dma_rx_trigger_dma(uap))
 931                        dev_dbg(uap->port.dev, "could not trigger initial "
 932                                "RX DMA job, fall back to interrupt mode\n");
 933        }
 934}
 935
 936static void pl011_dma_shutdown(struct uart_amba_port *uap)
 937{
 938        if (!(uap->using_tx_dma || uap->using_rx_dma))
 939                return;
 940
 941        /* Disable RX and TX DMA */
 942        while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
 943                barrier();
 944
 945        spin_lock_irq(&uap->port.lock);
 946        uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
 947        writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 948        spin_unlock_irq(&uap->port.lock);
 949
 950        if (uap->using_tx_dma) {
 951                /* In theory, this should already be done by pl011_dma_flush_buffer */
 952                dmaengine_terminate_all(uap->dmatx.chan);
 953                if (uap->dmatx.queued) {
 954                        dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
 955                                     DMA_TO_DEVICE);
 956                        uap->dmatx.queued = false;
 957                }
 958
 959                kfree(uap->dmatx.buf);
 960                uap->using_tx_dma = false;
 961        }
 962
 963        if (uap->using_rx_dma) {
 964                dmaengine_terminate_all(uap->dmarx.chan);
 965                /* Clean up the RX DMA */
 966                pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
 967                pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
 968                uap->using_rx_dma = false;
 969        }
 970}
 971
 972static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
 973{
 974        return uap->using_rx_dma;
 975}
 976
 977static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
 978{
 979        return uap->using_rx_dma && uap->dmarx.running;
 980}
 981
 982
 983#else
 984/* Blank functions if the DMA engine is not available */
 985static inline void pl011_dma_probe(struct uart_amba_port *uap)
 986{
 987}
 988
 989static inline void pl011_dma_remove(struct uart_amba_port *uap)
 990{
 991}
 992
 993static inline void pl011_dma_startup(struct uart_amba_port *uap)
 994{
 995}
 996
 997static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
 998{
 999}
1000
1001static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1002{
1003        return false;
1004}
1005
1006static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1007{
1008}
1009
1010static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1011{
1012        return false;
1013}
1014
1015static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1016{
1017}
1018
1019static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1020{
1021}
1022
1023static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1024{
1025        return -EIO;
1026}
1027
1028static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1029{
1030        return false;
1031}
1032
1033static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1034{
1035        return false;
1036}
1037
1038#define pl011_dma_flush_buffer  NULL
1039#endif
1040
1041static void pl011_stop_tx(struct uart_port *port)
1042{
1043        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1044
1045        uap->im &= ~UART011_TXIM;
1046        writew(uap->im, uap->port.membase + UART011_IMSC);
1047        pl011_dma_tx_stop(uap);
1048}
1049
1050static void pl011_start_tx(struct uart_port *port)
1051{
1052        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1053
1054        if (!pl011_dma_tx_start(uap)) {
1055                uap->im |= UART011_TXIM;
1056                writew(uap->im, uap->port.membase + UART011_IMSC);
1057        }
1058}
1059
1060static void pl011_stop_rx(struct uart_port *port)
1061{
1062        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1063
1064        uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1065                     UART011_PEIM|UART011_BEIM|UART011_OEIM);
1066        writew(uap->im, uap->port.membase + UART011_IMSC);
1067
1068        pl011_dma_rx_stop(uap);
1069}
1070
1071static void pl011_enable_ms(struct uart_port *port)
1072{
1073        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1074
1075        uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1076        writew(uap->im, uap->port.membase + UART011_IMSC);
1077}
1078
1079static void pl011_rx_chars(struct uart_amba_port *uap)
1080{
1081        struct tty_struct *tty = uap->port.state->port.tty;
1082
1083        pl011_fifo_to_tty(uap);
1084
1085        spin_unlock(&uap->port.lock);
1086        tty_flip_buffer_push(tty);
1087        /*
1088         * If we were temporarily out of DMA mode for a while,
1089         * attempt to switch back to DMA mode again.
1090         */
1091        if (pl011_dma_rx_available(uap)) {
1092                if (pl011_dma_rx_trigger_dma(uap)) {
1093                        dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1094                                "fall back to interrupt mode again\n");
1095                        uap->im |= UART011_RXIM;
1096                } else
1097                        uap->im &= ~UART011_RXIM;
1098                writew(uap->im, uap->port.membase + UART011_IMSC);
1099        }
1100        spin_lock(&uap->port.lock);
1101}
1102
1103static void pl011_tx_chars(struct uart_amba_port *uap)
1104{
1105        struct circ_buf *xmit = &uap->port.state->xmit;
1106        int count;
1107
1108        if (uap->port.x_char) {
1109                writew(uap->port.x_char, uap->port.membase + UART01x_DR);
1110                uap->port.icount.tx++;
1111                uap->port.x_char = 0;
1112                return;
1113        }
1114        if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1115                pl011_stop_tx(&uap->port);
1116                return;
1117        }
1118
1119        /* If we are using DMA mode, try to send some characters. */
1120        if (pl011_dma_tx_irq(uap))
1121                return;
1122
1123        count = uap->fifosize >> 1;
1124        do {
1125                writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR);
1126                xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1127                uap->port.icount.tx++;
1128                if (uart_circ_empty(xmit))
1129                        break;
1130        } while (--count > 0);
1131
1132        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1133                uart_write_wakeup(&uap->port);
1134
1135        if (uart_circ_empty(xmit))
1136                pl011_stop_tx(&uap->port);
1137}
1138
1139static void pl011_modem_status(struct uart_amba_port *uap)
1140{
1141        unsigned int status, delta;
1142
1143        status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1144
1145        delta = status ^ uap->old_status;
1146        uap->old_status = status;
1147
1148        if (!delta)
1149                return;
1150
1151        if (delta & UART01x_FR_DCD)
1152                uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1153
1154        if (delta & UART01x_FR_DSR)
1155                uap->port.icount.dsr++;
1156
1157        if (delta & UART01x_FR_CTS)
1158                uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
1159
1160        wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1161}
1162
1163static irqreturn_t pl011_int(int irq, void *dev_id)
1164{
1165        struct uart_amba_port *uap = dev_id;
1166        unsigned long flags;
1167        unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1168        int handled = 0;
1169        unsigned int dummy_read;
1170
1171        spin_lock_irqsave(&uap->port.lock, flags);
1172
1173        status = readw(uap->port.membase + UART011_MIS);
1174        if (status) {
1175                do {
1176                        if (uap->vendor->cts_event_workaround) {
1177                                /* workaround to make sure that all bits are unlocked.. */
1178                                writew(0x00, uap->port.membase + UART011_ICR);
1179
1180                                /*
1181                                 * WA: introduce 26ns(1 uart clk) delay before W1C;
1182                                 * single apb access will incur 2 pclk(133.12Mhz) delay,
1183                                 * so add 2 dummy reads
1184                                 */
1185                                dummy_read = readw(uap->port.membase + UART011_ICR);
1186                                dummy_read = readw(uap->port.membase + UART011_ICR);
1187                        }
1188
1189                        writew(status & ~(UART011_TXIS|UART011_RTIS|
1190                                          UART011_RXIS),
1191                               uap->port.membase + UART011_ICR);
1192
1193                        if (status & (UART011_RTIS|UART011_RXIS)) {
1194                                if (pl011_dma_rx_running(uap))
1195                                        pl011_dma_rx_irq(uap);
1196                                else
1197                                        pl011_rx_chars(uap);
1198                        }
1199                        if (status & (UART011_DSRMIS|UART011_DCDMIS|
1200                                      UART011_CTSMIS|UART011_RIMIS))
1201                                pl011_modem_status(uap);
1202                        if (status & UART011_TXIS)
1203                                pl011_tx_chars(uap);
1204
1205                        if (pass_counter-- == 0)
1206                                break;
1207
1208                        status = readw(uap->port.membase + UART011_MIS);
1209                } while (status != 0);
1210                handled = 1;
1211        }
1212
1213        spin_unlock_irqrestore(&uap->port.lock, flags);
1214
1215        return IRQ_RETVAL(handled);
1216}
1217
1218static unsigned int pl01x_tx_empty(struct uart_port *port)
1219{
1220        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1221        unsigned int status = readw(uap->port.membase + UART01x_FR);
1222        return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
1223}
1224
1225static unsigned int pl01x_get_mctrl(struct uart_port *port)
1226{
1227        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1228        unsigned int result = 0;
1229        unsigned int status = readw(uap->port.membase + UART01x_FR);
1230
1231#define TIOCMBIT(uartbit, tiocmbit)     \
1232        if (status & uartbit)           \
1233                result |= tiocmbit
1234
1235        TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1236        TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
1237        TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
1238        TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
1239#undef TIOCMBIT
1240        return result;
1241}
1242
1243static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1244{
1245        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1246        unsigned int cr;
1247
1248        cr = readw(uap->port.membase + UART011_CR);
1249
1250#define TIOCMBIT(tiocmbit, uartbit)             \
1251        if (mctrl & tiocmbit)           \
1252                cr |= uartbit;          \
1253        else                            \
1254                cr &= ~uartbit
1255
1256        TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1257        TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1258        TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1259        TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1260        TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1261
1262        if (uap->autorts) {
1263                /* We need to disable auto-RTS if we want to turn RTS off */
1264                TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1265        }
1266#undef TIOCMBIT
1267
1268        writew(cr, uap->port.membase + UART011_CR);
1269}
1270
1271static void pl011_break_ctl(struct uart_port *port, int break_state)
1272{
1273        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1274        unsigned long flags;
1275        unsigned int lcr_h;
1276
1277        spin_lock_irqsave(&uap->port.lock, flags);
1278        lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1279        if (break_state == -1)
1280                lcr_h |= UART01x_LCRH_BRK;
1281        else
1282                lcr_h &= ~UART01x_LCRH_BRK;
1283        writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1284        spin_unlock_irqrestore(&uap->port.lock, flags);
1285}
1286
1287#ifdef CONFIG_CONSOLE_POLL
1288static int pl010_get_poll_char(struct uart_port *port)
1289{
1290        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1291        unsigned int status;
1292
1293        status = readw(uap->port.membase + UART01x_FR);
1294        if (status & UART01x_FR_RXFE)
1295                return NO_POLL_CHAR;
1296
1297        return readw(uap->port.membase + UART01x_DR);
1298}
1299
1300static void pl010_put_poll_char(struct uart_port *port,
1301                         unsigned char ch)
1302{
1303        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1304
1305        while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1306                barrier();
1307
1308        writew(ch, uap->port.membase + UART01x_DR);
1309}
1310
1311#endif /* CONFIG_CONSOLE_POLL */
1312
1313static int pl011_startup(struct uart_port *port)
1314{
1315        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1316        unsigned int cr;
1317        int retval;
1318
1319        /* Optionaly enable pins to be muxed in and configured */
1320        if (!IS_ERR(uap->pins_default)) {
1321                retval = pinctrl_select_state(uap->pinctrl, uap->pins_default);
1322                if (retval)
1323                        dev_err(port->dev,
1324                                "could not set default pins\n");
1325        }
1326
1327        retval = clk_prepare(uap->clk);
1328        if (retval)
1329                goto out;
1330
1331        /*
1332         * Try to enable the clock producer.
1333         */
1334        retval = clk_enable(uap->clk);
1335        if (retval)
1336                goto clk_unprep;
1337
1338        uap->port.uartclk = clk_get_rate(uap->clk);
1339
1340        /* Clear pending error and receive interrupts */
1341        writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
1342               UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
1343
1344        /*
1345         * Allocate the IRQ
1346         */
1347        retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1348        if (retval)
1349                goto clk_dis;
1350
1351        writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
1352
1353        /*
1354         * Provoke TX FIFO interrupt into asserting.
1355         */
1356        cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
1357        writew(cr, uap->port.membase + UART011_CR);
1358        writew(0, uap->port.membase + UART011_FBRD);
1359        writew(1, uap->port.membase + UART011_IBRD);
1360        writew(0, uap->port.membase + uap->lcrh_rx);
1361        if (uap->lcrh_tx != uap->lcrh_rx) {
1362                int i;
1363                /*
1364                 * Wait 10 PCLKs before writing LCRH_TX register,
1365                 * to get this delay write read only register 10 times
1366                 */
1367                for (i = 0; i < 10; ++i)
1368                        writew(0xff, uap->port.membase + UART011_MIS);
1369                writew(0, uap->port.membase + uap->lcrh_tx);
1370        }
1371        writew(0, uap->port.membase + UART01x_DR);
1372        while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
1373                barrier();
1374
1375        /* restore RTS and DTR */
1376        cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1377        cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1378        writew(cr, uap->port.membase + UART011_CR);
1379
1380        /*
1381         * initialise the old status of the modem signals
1382         */
1383        uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1384
1385        /* Startup DMA */
1386        pl011_dma_startup(uap);
1387
1388        /*
1389         * Finally, enable interrupts, only timeouts when using DMA
1390         * if initial RX DMA job failed, start in interrupt mode
1391         * as well.
1392         */
1393        spin_lock_irq(&uap->port.lock);
1394        /* Clear out any spuriously appearing RX interrupts */
1395         writew(UART011_RTIS | UART011_RXIS,
1396                uap->port.membase + UART011_ICR);
1397        uap->im = UART011_RTIM;
1398        if (!pl011_dma_rx_running(uap))
1399                uap->im |= UART011_RXIM;
1400        writew(uap->im, uap->port.membase + UART011_IMSC);
1401        spin_unlock_irq(&uap->port.lock);
1402
1403        if (uap->port.dev->platform_data) {
1404                struct amba_pl011_data *plat;
1405
1406                plat = uap->port.dev->platform_data;
1407                if (plat->init)
1408                        plat->init();
1409        }
1410
1411        return 0;
1412
1413 clk_dis:
1414        clk_disable(uap->clk);
1415 clk_unprep:
1416        clk_unprepare(uap->clk);
1417 out:
1418        return retval;
1419}
1420
1421static void pl011_shutdown_channel(struct uart_amba_port *uap,
1422                                        unsigned int lcrh)
1423{
1424      unsigned long val;
1425
1426      val = readw(uap->port.membase + lcrh);
1427      val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1428      writew(val, uap->port.membase + lcrh);
1429}
1430
1431static void pl011_shutdown(struct uart_port *port)
1432{
1433        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1434        unsigned int cr;
1435        int retval;
1436
1437        /*
1438         * disable all interrupts
1439         */
1440        spin_lock_irq(&uap->port.lock);
1441        uap->im = 0;
1442        writew(uap->im, uap->port.membase + UART011_IMSC);
1443        writew(0xffff, uap->port.membase + UART011_ICR);
1444        spin_unlock_irq(&uap->port.lock);
1445
1446        pl011_dma_shutdown(uap);
1447
1448        /*
1449         * Free the interrupt
1450         */
1451        free_irq(uap->port.irq, uap);
1452
1453        /*
1454         * disable the port
1455         * disable the port. It should not disable RTS and DTR.
1456         * Also RTS and DTR state should be preserved to restore
1457         * it during startup().
1458         */
1459        uap->autorts = false;
1460        cr = readw(uap->port.membase + UART011_CR);
1461        uap->old_cr = cr;
1462        cr &= UART011_CR_RTS | UART011_CR_DTR;
1463        cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1464        writew(cr, uap->port.membase + UART011_CR);
1465
1466        /*
1467         * disable break condition and fifos
1468         */
1469        pl011_shutdown_channel(uap, uap->lcrh_rx);
1470        if (uap->lcrh_rx != uap->lcrh_tx)
1471                pl011_shutdown_channel(uap, uap->lcrh_tx);
1472
1473        /*
1474         * Shut down the clock producer
1475         */
1476        clk_disable(uap->clk);
1477        clk_unprepare(uap->clk);
1478        /* Optionally let pins go into sleep states */
1479        if (!IS_ERR(uap->pins_sleep)) {
1480                retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep);
1481                if (retval)
1482                        dev_err(port->dev,
1483                                "could not set pins to sleep state\n");
1484        }
1485
1486
1487        if (uap->port.dev->platform_data) {
1488                struct amba_pl011_data *plat;
1489
1490                plat = uap->port.dev->platform_data;
1491                if (plat->exit)
1492                        plat->exit();
1493        }
1494
1495}
1496
1497static void
1498pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1499                     struct ktermios *old)
1500{
1501        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1502        unsigned int lcr_h, old_cr;
1503        unsigned long flags;
1504        unsigned int baud, quot, clkdiv;
1505
1506        if (uap->vendor->oversampling)
1507                clkdiv = 8;
1508        else
1509                clkdiv = 16;
1510
1511        /*
1512         * Ask the core to calculate the divisor for us.
1513         */
1514        baud = uart_get_baud_rate(port, termios, old, 0,
1515                                  port->uartclk / clkdiv);
1516
1517        if (baud > port->uartclk/16)
1518                quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1519        else
1520                quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1521
1522        switch (termios->c_cflag & CSIZE) {
1523        case CS5:
1524                lcr_h = UART01x_LCRH_WLEN_5;
1525                break;
1526        case CS6:
1527                lcr_h = UART01x_LCRH_WLEN_6;
1528                break;
1529        case CS7:
1530                lcr_h = UART01x_LCRH_WLEN_7;
1531                break;
1532        default: // CS8
1533                lcr_h = UART01x_LCRH_WLEN_8;
1534                break;
1535        }
1536        if (termios->c_cflag & CSTOPB)
1537                lcr_h |= UART01x_LCRH_STP2;
1538        if (termios->c_cflag & PARENB) {
1539                lcr_h |= UART01x_LCRH_PEN;
1540                if (!(termios->c_cflag & PARODD))
1541                        lcr_h |= UART01x_LCRH_EPS;
1542        }
1543        if (uap->fifosize > 1)
1544                lcr_h |= UART01x_LCRH_FEN;
1545
1546        spin_lock_irqsave(&port->lock, flags);
1547
1548        /*
1549         * Update the per-port timeout.
1550         */
1551        uart_update_timeout(port, termios->c_cflag, baud);
1552
1553        port->read_status_mask = UART011_DR_OE | 255;
1554        if (termios->c_iflag & INPCK)
1555                port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1556        if (termios->c_iflag & (BRKINT | PARMRK))
1557                port->read_status_mask |= UART011_DR_BE;
1558
1559        /*
1560         * Characters to ignore
1561         */
1562        port->ignore_status_mask = 0;
1563        if (termios->c_iflag & IGNPAR)
1564                port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1565        if (termios->c_iflag & IGNBRK) {
1566                port->ignore_status_mask |= UART011_DR_BE;
1567                /*
1568                 * If we're ignoring parity and break indicators,
1569                 * ignore overruns too (for real raw support).
1570                 */
1571                if (termios->c_iflag & IGNPAR)
1572                        port->ignore_status_mask |= UART011_DR_OE;
1573        }
1574
1575        /*
1576         * Ignore all characters if CREAD is not set.
1577         */
1578        if ((termios->c_cflag & CREAD) == 0)
1579                port->ignore_status_mask |= UART_DUMMY_DR_RX;
1580
1581        if (UART_ENABLE_MS(port, termios->c_cflag))
1582                pl011_enable_ms(port);
1583
1584        /* first, disable everything */
1585        old_cr = readw(port->membase + UART011_CR);
1586        writew(0, port->membase + UART011_CR);
1587
1588        if (termios->c_cflag & CRTSCTS) {
1589                if (old_cr & UART011_CR_RTS)
1590                        old_cr |= UART011_CR_RTSEN;
1591
1592                old_cr |= UART011_CR_CTSEN;
1593                uap->autorts = true;
1594        } else {
1595                old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
1596                uap->autorts = false;
1597        }
1598
1599        if (uap->vendor->oversampling) {
1600                if (baud > port->uartclk / 16)
1601                        old_cr |= ST_UART011_CR_OVSFACT;
1602                else
1603                        old_cr &= ~ST_UART011_CR_OVSFACT;
1604        }
1605
1606        /* Set baud rate */
1607        writew(quot & 0x3f, port->membase + UART011_FBRD);
1608        writew(quot >> 6, port->membase + UART011_IBRD);
1609
1610        /*
1611         * ----------v----------v----------v----------v-----
1612         * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L
1613         * ----------^----------^----------^----------^-----
1614         */
1615        writew(lcr_h, port->membase + uap->lcrh_rx);
1616        if (uap->lcrh_rx != uap->lcrh_tx) {
1617                int i;
1618                /*
1619                 * Wait 10 PCLKs before writing LCRH_TX register,
1620                 * to get this delay write read only register 10 times
1621                 */
1622                for (i = 0; i < 10; ++i)
1623                        writew(0xff, uap->port.membase + UART011_MIS);
1624                writew(lcr_h, port->membase + uap->lcrh_tx);
1625        }
1626        writew(old_cr, port->membase + UART011_CR);
1627
1628        spin_unlock_irqrestore(&port->lock, flags);
1629}
1630
1631static const char *pl011_type(struct uart_port *port)
1632{
1633        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1634        return uap->port.type == PORT_AMBA ? uap->type : NULL;
1635}
1636
1637/*
1638 * Release the memory region(s) being used by 'port'
1639 */
1640static void pl010_release_port(struct uart_port *port)
1641{
1642        release_mem_region(port->mapbase, SZ_4K);
1643}
1644
1645/*
1646 * Request the memory region(s) being used by 'port'
1647 */
1648static int pl010_request_port(struct uart_port *port)
1649{
1650        return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
1651                        != NULL ? 0 : -EBUSY;
1652}
1653
1654/*
1655 * Configure/autoconfigure the port.
1656 */
1657static void pl010_config_port(struct uart_port *port, int flags)
1658{
1659        if (flags & UART_CONFIG_TYPE) {
1660                port->type = PORT_AMBA;
1661                pl010_request_port(port);
1662        }
1663}
1664
1665/*
1666 * verify the new serial_struct (for TIOCSSERIAL).
1667 */
1668static int pl010_verify_port(struct uart_port *port, struct serial_struct *ser)
1669{
1670        int ret = 0;
1671        if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
1672                ret = -EINVAL;
1673        if (ser->irq < 0 || ser->irq >= nr_irqs)
1674                ret = -EINVAL;
1675        if (ser->baud_base < 9600)
1676                ret = -EINVAL;
1677        return ret;
1678}
1679
1680static struct uart_ops amba_pl011_pops = {
1681        .tx_empty       = pl01x_tx_empty,
1682        .set_mctrl      = pl011_set_mctrl,
1683        .get_mctrl      = pl01x_get_mctrl,
1684        .stop_tx        = pl011_stop_tx,
1685        .start_tx       = pl011_start_tx,
1686        .stop_rx        = pl011_stop_rx,
1687        .enable_ms      = pl011_enable_ms,
1688        .break_ctl      = pl011_break_ctl,
1689        .startup        = pl011_startup,
1690        .shutdown       = pl011_shutdown,
1691        .flush_buffer   = pl011_dma_flush_buffer,
1692        .set_termios    = pl011_set_termios,
1693        .type           = pl011_type,
1694        .release_port   = pl010_release_port,
1695        .request_port   = pl010_request_port,
1696        .config_port    = pl010_config_port,
1697        .verify_port    = pl010_verify_port,
1698#ifdef CONFIG_CONSOLE_POLL
1699        .poll_get_char = pl010_get_poll_char,
1700        .poll_put_char = pl010_put_poll_char,
1701#endif
1702};
1703
1704static struct uart_amba_port *amba_ports[UART_NR];
1705
1706#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
1707
1708static void pl011_console_putchar(struct uart_port *port, int ch)
1709{
1710        struct uart_amba_port *uap = (struct uart_amba_port *)port;
1711
1712        while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1713                barrier();
1714        writew(ch, uap->port.membase + UART01x_DR);
1715}
1716
1717static void
1718pl011_console_write(struct console *co, const char *s, unsigned int count)
1719{
1720        struct uart_amba_port *uap = amba_ports[co->index];
1721        unsigned int status, old_cr, new_cr;
1722        unsigned long flags;
1723        int locked = 1;
1724
1725        clk_enable(uap->clk);
1726
1727        local_irq_save(flags);
1728        if (uap->port.sysrq)
1729                locked = 0;
1730        else if (oops_in_progress)
1731                locked = spin_trylock(&uap->port.lock);
1732        else
1733                spin_lock(&uap->port.lock);
1734
1735        /*
1736         *      First save the CR then disable the interrupts
1737         */
1738        old_cr = readw(uap->port.membase + UART011_CR);
1739        new_cr = old_cr & ~UART011_CR_CTSEN;
1740        new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1741        writew(new_cr, uap->port.membase + UART011_CR);
1742
1743        uart_console_write(&uap->port, s, count, pl011_console_putchar);
1744
1745        /*
1746         *      Finally, wait for transmitter to become empty
1747         *      and restore the TCR
1748         */
1749        do {
1750                status = readw(uap->port.membase + UART01x_FR);
1751        } while (status & UART01x_FR_BUSY);
1752        writew(old_cr, uap->port.membase + UART011_CR);
1753
1754        if (locked)
1755                spin_unlock(&uap->port.lock);
1756        local_irq_restore(flags);
1757
1758        clk_disable(uap->clk);
1759}
1760
1761static void __init
1762pl011_console_get_options(struct uart_amba_port *uap, int *baud,
1763                             int *parity, int *bits)
1764{
1765        if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) {
1766                unsigned int lcr_h, ibrd, fbrd;
1767
1768                lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1769
1770                *parity = 'n';
1771                if (lcr_h & UART01x_LCRH_PEN) {
1772                        if (lcr_h & UART01x_LCRH_EPS)
1773                                *parity = 'e';
1774                        else
1775                                *parity = 'o';
1776                }
1777
1778                if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
1779                        *bits = 7;
1780                else
1781                        *bits = 8;
1782
1783                ibrd = readw(uap->port.membase + UART011_IBRD);
1784                fbrd = readw(uap->port.membase + UART011_FBRD);
1785
1786                *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
1787
1788                if (uap->vendor->oversampling) {
1789                        if (readw(uap->port.membase + UART011_CR)
1790                                  & ST_UART011_CR_OVSFACT)
1791                                *baud *= 2;
1792                }
1793        }
1794}
1795
1796static int __init pl011_console_setup(struct console *co, char *options)
1797{
1798        struct uart_amba_port *uap;
1799        int baud = 38400;
1800        int bits = 8;
1801        int parity = 'n';
1802        int flow = 'n';
1803        int ret;
1804
1805        /*
1806         * Check whether an invalid uart number has been specified, and
1807         * if so, search for the first available port that does have
1808         * console support.
1809         */
1810        if (co->index >= UART_NR)
1811                co->index = 0;
1812        uap = amba_ports[co->index];
1813        if (!uap)
1814                return -ENODEV;
1815
1816        /* Allow pins to be muxed in and configured */
1817        if (!IS_ERR(uap->pins_default)) {
1818                ret = pinctrl_select_state(uap->pinctrl, uap->pins_default);
1819                if (ret)
1820                        dev_err(uap->port.dev,
1821                                "could not set default pins\n");
1822        }
1823
1824        ret = clk_prepare(uap->clk);
1825        if (ret)
1826                return ret;
1827
1828        if (uap->port.dev->platform_data) {
1829                struct amba_pl011_data *plat;
1830
1831                plat = uap->port.dev->platform_data;
1832                if (plat->init)
1833                        plat->init();
1834        }
1835
1836        uap->port.uartclk = clk_get_rate(uap->clk);
1837
1838        if (options)
1839                uart_parse_options(options, &baud, &parity, &bits, &flow);
1840        else
1841                pl011_console_get_options(uap, &baud, &parity, &bits);
1842
1843        return uart_set_options(&uap->port, co, baud, parity, bits, flow);
1844}
1845
1846static struct uart_driver amba_reg;
1847static struct console amba_console = {
1848        .name           = "ttyAMA",
1849        .write          = pl011_console_write,
1850        .device         = uart_console_device,
1851        .setup          = pl011_console_setup,
1852        .flags          = CON_PRINTBUFFER,
1853        .index          = -1,
1854        .data           = &amba_reg,
1855};
1856
1857#define AMBA_CONSOLE    (&amba_console)
1858#else
1859#define AMBA_CONSOLE    NULL
1860#endif
1861
1862static struct uart_driver amba_reg = {
1863        .owner                  = THIS_MODULE,
1864        .driver_name            = "ttyAMA",
1865        .dev_name               = "ttyAMA",
1866        .major                  = SERIAL_AMBA_MAJOR,
1867        .minor                  = SERIAL_AMBA_MINOR,
1868        .nr                     = UART_NR,
1869        .cons                   = AMBA_CONSOLE,
1870};
1871
1872static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
1873{
1874        struct uart_amba_port *uap;
1875        struct vendor_data *vendor = id->data;
1876        void __iomem *base;
1877        int i, ret;
1878
1879        for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
1880                if (amba_ports[i] == NULL)
1881                        break;
1882
1883        if (i == ARRAY_SIZE(amba_ports)) {
1884                ret = -EBUSY;
1885                goto out;
1886        }
1887
1888        uap = kzalloc(sizeof(struct uart_amba_port), GFP_KERNEL);
1889        if (uap == NULL) {
1890                ret = -ENOMEM;
1891                goto out;
1892        }
1893
1894        base = ioremap(dev->res.start, resource_size(&dev->res));
1895        if (!base) {
1896                ret = -ENOMEM;
1897                goto free;
1898        }
1899
1900        uap->pinctrl = devm_pinctrl_get(&dev->dev);
1901        if (IS_ERR(uap->pinctrl)) {
1902                ret = PTR_ERR(uap->pinctrl);
1903                goto unmap;
1904        }
1905        uap->pins_default = pinctrl_lookup_state(uap->pinctrl,
1906                                                 PINCTRL_STATE_DEFAULT);
1907        if (IS_ERR(uap->pins_default))
1908                dev_err(&dev->dev, "could not get default pinstate\n");
1909
1910        uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl,
1911                                               PINCTRL_STATE_SLEEP);
1912        if (IS_ERR(uap->pins_sleep))
1913                dev_dbg(&dev->dev, "could not get sleep pinstate\n");
1914
1915        uap->clk = clk_get(&dev->dev, NULL);
1916        if (IS_ERR(uap->clk)) {
1917                ret = PTR_ERR(uap->clk);
1918                goto unmap;
1919        }
1920
1921        uap->vendor = vendor;
1922        uap->lcrh_rx = vendor->lcrh_rx;
1923        uap->lcrh_tx = vendor->lcrh_tx;
1924        uap->old_cr = 0;
1925        uap->fifosize = vendor->fifosize;
1926        uap->interrupt_may_hang = vendor->interrupt_may_hang;
1927        uap->port.dev = &dev->dev;
1928        uap->port.mapbase = dev->res.start;
1929        uap->port.membase = base;
1930        uap->port.iotype = UPIO_MEM;
1931        uap->port.irq = dev->irq[0];
1932        uap->port.fifosize = uap->fifosize;
1933        uap->port.ops = &amba_pl011_pops;
1934        uap->port.flags = UPF_BOOT_AUTOCONF;
1935        uap->port.line = i;
1936        pl011_dma_probe(uap);
1937
1938        /* Ensure interrupts from this UART are masked and cleared */
1939        writew(0, uap->port.membase + UART011_IMSC);
1940        writew(0xffff, uap->port.membase + UART011_ICR);
1941
1942        snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
1943
1944        amba_ports[i] = uap;
1945
1946        amba_set_drvdata(dev, uap);
1947        ret = uart_add_one_port(&amba_reg, &uap->port);
1948        if (ret) {
1949                amba_set_drvdata(dev, NULL);
1950                amba_ports[i] = NULL;
1951                pl011_dma_remove(uap);
1952                clk_put(uap->clk);
1953 unmap:
1954                iounmap(base);
1955 free:
1956                kfree(uap);
1957        }
1958 out:
1959        return ret;
1960}
1961
1962static int pl011_remove(struct amba_device *dev)
1963{
1964        struct uart_amba_port *uap = amba_get_drvdata(dev);
1965        int i;
1966
1967        amba_set_drvdata(dev, NULL);
1968
1969        uart_remove_one_port(&amba_reg, &uap->port);
1970
1971        for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
1972                if (amba_ports[i] == uap)
1973                        amba_ports[i] = NULL;
1974
1975        pl011_dma_remove(uap);
1976        iounmap(uap->port.membase);
1977        clk_put(uap->clk);
1978        kfree(uap);
1979        return 0;
1980}
1981
1982#ifdef CONFIG_PM
1983static int pl011_suspend(struct amba_device *dev, pm_message_t state)
1984{
1985        struct uart_amba_port *uap = amba_get_drvdata(dev);
1986
1987        if (!uap)
1988                return -EINVAL;
1989
1990        return uart_suspend_port(&amba_reg, &uap->port);
1991}
1992
1993static int pl011_resume(struct amba_device *dev)
1994{
1995        struct uart_amba_port *uap = amba_get_drvdata(dev);
1996
1997        if (!uap)
1998                return -EINVAL;
1999
2000        return uart_resume_port(&amba_reg, &uap->port);
2001}
2002#endif
2003
2004static struct amba_id pl011_ids[] = {
2005        {
2006                .id     = 0x00041011,
2007                .mask   = 0x000fffff,
2008                .data   = &vendor_arm,
2009        },
2010        {
2011                .id     = 0x00380802,
2012                .mask   = 0x00ffffff,
2013                .data   = &vendor_st,
2014        },
2015        { 0, 0 },
2016};
2017
2018MODULE_DEVICE_TABLE(amba, pl011_ids);
2019
2020static struct amba_driver pl011_driver = {
2021        .drv = {
2022                .name   = "uart-pl011",
2023        },
2024        .id_table       = pl011_ids,
2025        .probe          = pl011_probe,
2026        .remove         = pl011_remove,
2027#ifdef CONFIG_PM
2028        .suspend        = pl011_suspend,
2029        .resume         = pl011_resume,
2030#endif
2031};
2032
2033static int __init pl011_init(void)
2034{
2035        int ret;
2036        printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2037
2038        ret = uart_register_driver(&amba_reg);
2039        if (ret == 0) {
2040                ret = amba_driver_register(&pl011_driver);
2041                if (ret)
2042                        uart_unregister_driver(&amba_reg);
2043        }
2044        return ret;
2045}
2046
2047static void __exit pl011_exit(void)
2048{
2049        amba_driver_unregister(&pl011_driver);
2050        uart_unregister_driver(&amba_reg);
2051}
2052
2053/*
2054 * While this can be a module, if builtin it's most likely the console
2055 * So let's leave module_exit but move module_init to an earlier place
2056 */
2057arch_initcall(pl011_init);
2058module_exit(pl011_exit);
2059
2060MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2061MODULE_DESCRIPTION("ARM AMBA serial port driver");
2062MODULE_LICENSE("GPL");
2063