linux/drivers/tty/serial/imx.c
<<
>>
Prefs
   1/*
   2 *  Driver for Motorola IMX serial ports
   3 *
   4 *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
   5 *
   6 *  Author: Sascha Hauer <sascha@saschahauer.de>
   7 *  Copyright (C) 2004 Pengutronix
   8 *
   9 *  Copyright (C) 2009 emlix GmbH
  10 *  Author: Fabian Godehardt (added IrDA support for iMX)
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 * You should have received a copy of the GNU General Public License
  23 * along with this program; if not, write to the Free Software
  24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  25 *
  26 * [29-Mar-2005] Mike Lee
  27 * Added hardware handshake
  28 */
  29
  30#if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
  31#define SUPPORT_SYSRQ
  32#endif
  33
  34#include <linux/module.h>
  35#include <linux/ioport.h>
  36#include <linux/init.h>
  37#include <linux/console.h>
  38#include <linux/sysrq.h>
  39#include <linux/platform_device.h>
  40#include <linux/tty.h>
  41#include <linux/tty_flip.h>
  42#include <linux/serial_core.h>
  43#include <linux/serial.h>
  44#include <linux/clk.h>
  45#include <linux/delay.h>
  46#include <linux/rational.h>
  47#include <linux/slab.h>
  48#include <linux/of.h>
  49#include <linux/of_device.h>
  50#include <linux/io.h>
  51#include <linux/dma-mapping.h>
  52
  53#include <asm/irq.h>
  54#include <linux/platform_data/serial-imx.h>
  55#include <linux/platform_data/dma-imx.h>
  56
  57/* Register definitions */
  58#define URXD0 0x0  /* Receiver Register */
  59#define URTX0 0x40 /* Transmitter Register */
  60#define UCR1  0x80 /* Control Register 1 */
  61#define UCR2  0x84 /* Control Register 2 */
  62#define UCR3  0x88 /* Control Register 3 */
  63#define UCR4  0x8c /* Control Register 4 */
  64#define UFCR  0x90 /* FIFO Control Register */
  65#define USR1  0x94 /* Status Register 1 */
  66#define USR2  0x98 /* Status Register 2 */
  67#define UESC  0x9c /* Escape Character Register */
  68#define UTIM  0xa0 /* Escape Timer Register */
  69#define UBIR  0xa4 /* BRM Incremental Register */
  70#define UBMR  0xa8 /* BRM Modulator Register */
  71#define UBRC  0xac /* Baud Rate Count Register */
  72#define IMX21_ONEMS 0xb0 /* One Millisecond register */
  73#define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */
  74#define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
  75
  76/* UART Control Register Bit Fields.*/
  77#define URXD_CHARRDY    (1<<15)
  78#define URXD_ERR        (1<<14)
  79#define URXD_OVRRUN     (1<<13)
  80#define URXD_FRMERR     (1<<12)
  81#define URXD_BRK        (1<<11)
  82#define URXD_PRERR      (1<<10)
  83#define UCR1_ADEN       (1<<15) /* Auto detect interrupt */
  84#define UCR1_ADBR       (1<<14) /* Auto detect baud rate */
  85#define UCR1_TRDYEN     (1<<13) /* Transmitter ready interrupt enable */
  86#define UCR1_IDEN       (1<<12) /* Idle condition interrupt */
  87#define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */
  88#define UCR1_RRDYEN     (1<<9)  /* Recv ready interrupt enable */
  89#define UCR1_RDMAEN     (1<<8)  /* Recv ready DMA enable */
  90#define UCR1_IREN       (1<<7)  /* Infrared interface enable */
  91#define UCR1_TXMPTYEN   (1<<6)  /* Transimitter empty interrupt enable */
  92#define UCR1_RTSDEN     (1<<5)  /* RTS delta interrupt enable */
  93#define UCR1_SNDBRK     (1<<4)  /* Send break */
  94#define UCR1_TDMAEN     (1<<3)  /* Transmitter ready DMA enable */
  95#define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
  96#define UCR1_ATDMAEN    (1<<2)  /* Aging DMA Timer Enable */
  97#define UCR1_DOZE       (1<<1)  /* Doze */
  98#define UCR1_UARTEN     (1<<0)  /* UART enabled */
  99#define UCR2_ESCI       (1<<15) /* Escape seq interrupt enable */
 100#define UCR2_IRTS       (1<<14) /* Ignore RTS pin */
 101#define UCR2_CTSC       (1<<13) /* CTS pin control */
 102#define UCR2_CTS        (1<<12) /* Clear to send */
 103#define UCR2_ESCEN      (1<<11) /* Escape enable */
 104#define UCR2_PREN       (1<<8)  /* Parity enable */
 105#define UCR2_PROE       (1<<7)  /* Parity odd/even */
 106#define UCR2_STPB       (1<<6)  /* Stop */
 107#define UCR2_WS         (1<<5)  /* Word size */
 108#define UCR2_RTSEN      (1<<4)  /* Request to send interrupt enable */
 109#define UCR2_ATEN       (1<<3)  /* Aging Timer Enable */
 110#define UCR2_TXEN       (1<<2)  /* Transmitter enabled */
 111#define UCR2_RXEN       (1<<1)  /* Receiver enabled */
 112#define UCR2_SRST       (1<<0)  /* SW reset */
 113#define UCR3_DTREN      (1<<13) /* DTR interrupt enable */
 114#define UCR3_PARERREN   (1<<12) /* Parity enable */
 115#define UCR3_FRAERREN   (1<<11) /* Frame error interrupt enable */
 116#define UCR3_DSR        (1<<10) /* Data set ready */
 117#define UCR3_DCD        (1<<9)  /* Data carrier detect */
 118#define UCR3_RI         (1<<8)  /* Ring indicator */
 119#define UCR3_TIMEOUTEN  (1<<7)  /* Timeout interrupt enable */
 120#define UCR3_RXDSEN     (1<<6)  /* Receive status interrupt enable */
 121#define UCR3_AIRINTEN   (1<<5)  /* Async IR wake interrupt enable */
 122#define UCR3_AWAKEN     (1<<4)  /* Async wake interrupt enable */
 123#define IMX21_UCR3_RXDMUXSEL    (1<<2)  /* RXD Muxed Input Select */
 124#define UCR3_INVT       (1<<1)  /* Inverted Infrared transmission */
 125#define UCR3_BPEN       (1<<0)  /* Preset registers enable */
 126#define UCR4_CTSTL_SHF  10      /* CTS trigger level shift */
 127#define UCR4_CTSTL_MASK 0x3F    /* CTS trigger is 6 bits wide */
 128#define UCR4_INVR       (1<<9)  /* Inverted infrared reception */
 129#define UCR4_ENIRI      (1<<8)  /* Serial infrared interrupt enable */
 130#define UCR4_WKEN       (1<<7)  /* Wake interrupt enable */
 131#define UCR4_REF16      (1<<6)  /* Ref freq 16 MHz */
 132#define UCR4_IDDMAEN    (1<<6)  /* DMA IDLE Condition Detected */
 133#define UCR4_IRSC       (1<<5)  /* IR special case */
 134#define UCR4_TCEN       (1<<3)  /* Transmit complete interrupt enable */
 135#define UCR4_BKEN       (1<<2)  /* Break condition interrupt enable */
 136#define UCR4_OREN       (1<<1)  /* Receiver overrun interrupt enable */
 137#define UCR4_DREN       (1<<0)  /* Recv data ready interrupt enable */
 138#define UFCR_RXTL_SHF   0       /* Receiver trigger level shift */
 139#define UFCR_DCEDTE     (1<<6)  /* DCE/DTE mode select */
 140#define UFCR_RFDIV      (7<<7)  /* Reference freq divider mask */
 141#define UFCR_RFDIV_REG(x)       (((x) < 7 ? 6 - (x) : 6) << 7)
 142#define UFCR_TXTL_SHF   10      /* Transmitter trigger level shift */
 143#define USR1_PARITYERR  (1<<15) /* Parity error interrupt flag */
 144#define USR1_RTSS       (1<<14) /* RTS pin status */
 145#define USR1_TRDY       (1<<13) /* Transmitter ready interrupt/dma flag */
 146#define USR1_RTSD       (1<<12) /* RTS delta */
 147#define USR1_ESCF       (1<<11) /* Escape seq interrupt flag */
 148#define USR1_FRAMERR    (1<<10) /* Frame error interrupt flag */
 149#define USR1_RRDY       (1<<9)   /* Receiver ready interrupt/dma flag */
 150#define USR1_TIMEOUT    (1<<7)   /* Receive timeout interrupt status */
 151#define USR1_RXDS        (1<<6)  /* Receiver idle interrupt flag */
 152#define USR1_AIRINT      (1<<5)  /* Async IR wake interrupt flag */
 153#define USR1_AWAKE       (1<<4)  /* Aysnc wake interrupt flag */
 154#define USR2_ADET        (1<<15) /* Auto baud rate detect complete */
 155#define USR2_TXFE        (1<<14) /* Transmit buffer FIFO empty */
 156#define USR2_DTRF        (1<<13) /* DTR edge interrupt flag */
 157#define USR2_IDLE        (1<<12) /* Idle condition */
 158#define USR2_IRINT       (1<<8)  /* Serial infrared interrupt flag */
 159#define USR2_WAKE        (1<<7)  /* Wake */
 160#define USR2_RTSF        (1<<4)  /* RTS edge interrupt flag */
 161#define USR2_TXDC        (1<<3)  /* Transmitter complete */
 162#define USR2_BRCD        (1<<2)  /* Break condition */
 163#define USR2_ORE        (1<<1)   /* Overrun error */
 164#define USR2_RDR        (1<<0)   /* Recv data ready */
 165#define UTS_FRCPERR     (1<<13) /* Force parity error */
 166#define UTS_LOOP        (1<<12)  /* Loop tx and rx */
 167#define UTS_TXEMPTY      (1<<6)  /* TxFIFO empty */
 168#define UTS_RXEMPTY      (1<<5)  /* RxFIFO empty */
 169#define UTS_TXFULL       (1<<4)  /* TxFIFO full */
 170#define UTS_RXFULL       (1<<3)  /* RxFIFO full */
 171#define UTS_SOFTRST      (1<<0)  /* Software reset */
 172
 173/* We've been assigned a range on the "Low-density serial ports" major */
 174#define SERIAL_IMX_MAJOR        207
 175#define MINOR_START             16
 176#define DEV_NAME                "ttymxc"
 177
 178/*
 179 * This determines how often we check the modem status signals
 180 * for any change.  They generally aren't connected to an IRQ
 181 * so we have to poll them.  We also check immediately before
 182 * filling the TX fifo incase CTS has been dropped.
 183 */
 184#define MCTRL_TIMEOUT   (250*HZ/1000)
 185
 186#define DRIVER_NAME "IMX-uart"
 187
 188#define UART_NR 8
 189
 190/* i.mx21 type uart runs on all i.mx except i.mx1 */
 191enum imx_uart_type {
 192        IMX1_UART,
 193        IMX21_UART,
 194        IMX6Q_UART,
 195};
 196
 197/* device type dependent stuff */
 198struct imx_uart_data {
 199        unsigned uts_reg;
 200        enum imx_uart_type devtype;
 201};
 202
 203struct imx_port {
 204        struct uart_port        port;
 205        struct timer_list       timer;
 206        unsigned int            old_status;
 207        int                     txirq, rxirq, rtsirq;
 208        unsigned int            have_rtscts:1;
 209        unsigned int            dte_mode:1;
 210        unsigned int            use_irda:1;
 211        unsigned int            irda_inv_rx:1;
 212        unsigned int            irda_inv_tx:1;
 213        unsigned short          trcv_delay; /* transceiver delay */
 214        struct clk              *clk_ipg;
 215        struct clk              *clk_per;
 216        const struct imx_uart_data *devdata;
 217
 218        /* DMA fields */
 219        unsigned int            dma_is_inited:1;
 220        unsigned int            dma_is_enabled:1;
 221        unsigned int            dma_is_rxing:1;
 222        unsigned int            dma_is_txing:1;
 223        struct dma_chan         *dma_chan_rx, *dma_chan_tx;
 224        struct scatterlist      rx_sgl, tx_sgl[2];
 225        void                    *rx_buf;
 226        unsigned int            tx_bytes;
 227        unsigned int            dma_tx_nents;
 228        wait_queue_head_t       dma_wait;
 229};
 230
 231struct imx_port_ucrs {
 232        unsigned int    ucr1;
 233        unsigned int    ucr2;
 234        unsigned int    ucr3;
 235};
 236
 237#ifdef CONFIG_IRDA
 238#define USE_IRDA(sport) ((sport)->use_irda)
 239#else
 240#define USE_IRDA(sport) (0)
 241#endif
 242
 243static struct imx_uart_data imx_uart_devdata[] = {
 244        [IMX1_UART] = {
 245                .uts_reg = IMX1_UTS,
 246                .devtype = IMX1_UART,
 247        },
 248        [IMX21_UART] = {
 249                .uts_reg = IMX21_UTS,
 250                .devtype = IMX21_UART,
 251        },
 252        [IMX6Q_UART] = {
 253                .uts_reg = IMX21_UTS,
 254                .devtype = IMX6Q_UART,
 255        },
 256};
 257
 258static struct platform_device_id imx_uart_devtype[] = {
 259        {
 260                .name = "imx1-uart",
 261                .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART],
 262        }, {
 263                .name = "imx21-uart",
 264                .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART],
 265        }, {
 266                .name = "imx6q-uart",
 267                .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX6Q_UART],
 268        }, {
 269                /* sentinel */
 270        }
 271};
 272MODULE_DEVICE_TABLE(platform, imx_uart_devtype);
 273
 274static struct of_device_id imx_uart_dt_ids[] = {
 275        { .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], },
 276        { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], },
 277        { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], },
 278        { /* sentinel */ }
 279};
 280MODULE_DEVICE_TABLE(of, imx_uart_dt_ids);
 281
 282static inline unsigned uts_reg(struct imx_port *sport)
 283{
 284        return sport->devdata->uts_reg;
 285}
 286
 287static inline int is_imx1_uart(struct imx_port *sport)
 288{
 289        return sport->devdata->devtype == IMX1_UART;
 290}
 291
 292static inline int is_imx21_uart(struct imx_port *sport)
 293{
 294        return sport->devdata->devtype == IMX21_UART;
 295}
 296
 297static inline int is_imx6q_uart(struct imx_port *sport)
 298{
 299        return sport->devdata->devtype == IMX6Q_UART;
 300}
 301/*
 302 * Save and restore functions for UCR1, UCR2 and UCR3 registers
 303 */
 304#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_IMX_CONSOLE)
 305static void imx_port_ucrs_save(struct uart_port *port,
 306                               struct imx_port_ucrs *ucr)
 307{
 308        /* save control registers */
 309        ucr->ucr1 = readl(port->membase + UCR1);
 310        ucr->ucr2 = readl(port->membase + UCR2);
 311        ucr->ucr3 = readl(port->membase + UCR3);
 312}
 313
 314static void imx_port_ucrs_restore(struct uart_port *port,
 315                                  struct imx_port_ucrs *ucr)
 316{
 317        /* restore control registers */
 318        writel(ucr->ucr1, port->membase + UCR1);
 319        writel(ucr->ucr2, port->membase + UCR2);
 320        writel(ucr->ucr3, port->membase + UCR3);
 321}
 322#endif
 323
 324/*
 325 * Handle any change of modem status signal since we were last called.
 326 */
 327static void imx_mctrl_check(struct imx_port *sport)
 328{
 329        unsigned int status, changed;
 330
 331        status = sport->port.ops->get_mctrl(&sport->port);
 332        changed = status ^ sport->old_status;
 333
 334        if (changed == 0)
 335                return;
 336
 337        sport->old_status = status;
 338
 339        if (changed & TIOCM_RI)
 340                sport->port.icount.rng++;
 341        if (changed & TIOCM_DSR)
 342                sport->port.icount.dsr++;
 343        if (changed & TIOCM_CAR)
 344                uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
 345        if (changed & TIOCM_CTS)
 346                uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
 347
 348        wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
 349}
 350
 351/*
 352 * This is our per-port timeout handler, for checking the
 353 * modem status signals.
 354 */
 355static void imx_timeout(unsigned long data)
 356{
 357        struct imx_port *sport = (struct imx_port *)data;
 358        unsigned long flags;
 359
 360        if (sport->port.state) {
 361                spin_lock_irqsave(&sport->port.lock, flags);
 362                imx_mctrl_check(sport);
 363                spin_unlock_irqrestore(&sport->port.lock, flags);
 364
 365                mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
 366        }
 367}
 368
 369/*
 370 * interrupts disabled on entry
 371 */
 372static void imx_stop_tx(struct uart_port *port)
 373{
 374        struct imx_port *sport = (struct imx_port *)port;
 375        unsigned long temp;
 376
 377        if (USE_IRDA(sport)) {
 378                /* half duplex - wait for end of transmission */
 379                int n = 256;
 380                while ((--n > 0) &&
 381                      !(readl(sport->port.membase + USR2) & USR2_TXDC)) {
 382                        udelay(5);
 383                        barrier();
 384                }
 385                /*
 386                 * irda transceiver - wait a bit more to avoid
 387                 * cutoff, hardware dependent
 388                 */
 389                udelay(sport->trcv_delay);
 390
 391                /*
 392                 * half duplex - reactivate receive mode,
 393                 * flush receive pipe echo crap
 394                 */
 395                if (readl(sport->port.membase + USR2) & USR2_TXDC) {
 396                        temp = readl(sport->port.membase + UCR1);
 397                        temp &= ~(UCR1_TXMPTYEN | UCR1_TRDYEN);
 398                        writel(temp, sport->port.membase + UCR1);
 399
 400                        temp = readl(sport->port.membase + UCR4);
 401                        temp &= ~(UCR4_TCEN);
 402                        writel(temp, sport->port.membase + UCR4);
 403
 404                        while (readl(sport->port.membase + URXD0) &
 405                               URXD_CHARRDY)
 406                                barrier();
 407
 408                        temp = readl(sport->port.membase + UCR1);
 409                        temp |= UCR1_RRDYEN;
 410                        writel(temp, sport->port.membase + UCR1);
 411
 412                        temp = readl(sport->port.membase + UCR4);
 413                        temp |= UCR4_DREN;
 414                        writel(temp, sport->port.membase + UCR4);
 415                }
 416                return;
 417        }
 418
 419        /*
 420         * We are maybe in the SMP context, so if the DMA TX thread is running
 421         * on other cpu, we have to wait for it to finish.
 422         */
 423        if (sport->dma_is_enabled && sport->dma_is_txing)
 424                return;
 425
 426        temp = readl(sport->port.membase + UCR1);
 427        writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1);
 428}
 429
 430/*
 431 * interrupts disabled on entry
 432 */
 433static void imx_stop_rx(struct uart_port *port)
 434{
 435        struct imx_port *sport = (struct imx_port *)port;
 436        unsigned long temp;
 437
 438        /*
 439         * We are maybe in the SMP context, so if the DMA TX thread is running
 440         * on other cpu, we have to wait for it to finish.
 441         */
 442        if (sport->dma_is_enabled && sport->dma_is_rxing)
 443                return;
 444
 445        temp = readl(sport->port.membase + UCR2);
 446        writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2);
 447}
 448
 449/*
 450 * Set the modem control timer to fire immediately.
 451 */
 452static void imx_enable_ms(struct uart_port *port)
 453{
 454        struct imx_port *sport = (struct imx_port *)port;
 455
 456        mod_timer(&sport->timer, jiffies);
 457}
 458
 459static inline void imx_transmit_buffer(struct imx_port *sport)
 460{
 461        struct circ_buf *xmit = &sport->port.state->xmit;
 462
 463        while (!uart_circ_empty(xmit) &&
 464                        !(readl(sport->port.membase + uts_reg(sport))
 465                                & UTS_TXFULL)) {
 466                /* send xmit->buf[xmit->tail]
 467                 * out the port here */
 468                writel(xmit->buf[xmit->tail], sport->port.membase + URTX0);
 469                xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
 470                sport->port.icount.tx++;
 471        }
 472
 473        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 474                uart_write_wakeup(&sport->port);
 475
 476        if (uart_circ_empty(xmit))
 477                imx_stop_tx(&sport->port);
 478}
 479
 480static void dma_tx_callback(void *data)
 481{
 482        struct imx_port *sport = data;
 483        struct scatterlist *sgl = &sport->tx_sgl[0];
 484        struct circ_buf *xmit = &sport->port.state->xmit;
 485        unsigned long flags;
 486
 487        dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
 488
 489        sport->dma_is_txing = 0;
 490
 491        /* update the stat */
 492        spin_lock_irqsave(&sport->port.lock, flags);
 493        xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1);
 494        sport->port.icount.tx += sport->tx_bytes;
 495        spin_unlock_irqrestore(&sport->port.lock, flags);
 496
 497        dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
 498
 499        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 500                uart_write_wakeup(&sport->port);
 501
 502        if (waitqueue_active(&sport->dma_wait)) {
 503                wake_up(&sport->dma_wait);
 504                dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
 505                return;
 506        }
 507}
 508
 509static void imx_dma_tx(struct imx_port *sport)
 510{
 511        struct circ_buf *xmit = &sport->port.state->xmit;
 512        struct scatterlist *sgl = sport->tx_sgl;
 513        struct dma_async_tx_descriptor *desc;
 514        struct dma_chan *chan = sport->dma_chan_tx;
 515        struct device *dev = sport->port.dev;
 516        enum dma_status status;
 517        int ret;
 518
 519        status = dmaengine_tx_status(chan, (dma_cookie_t)0, NULL);
 520        if (DMA_IN_PROGRESS == status)
 521                return;
 522
 523        sport->tx_bytes = uart_circ_chars_pending(xmit);
 524
 525        if (xmit->tail > xmit->head && xmit->head > 0) {
 526                sport->dma_tx_nents = 2;
 527                sg_init_table(sgl, 2);
 528                sg_set_buf(sgl, xmit->buf + xmit->tail,
 529                                UART_XMIT_SIZE - xmit->tail);
 530                sg_set_buf(sgl + 1, xmit->buf, xmit->head);
 531        } else {
 532                sport->dma_tx_nents = 1;
 533                sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
 534        }
 535
 536        ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
 537        if (ret == 0) {
 538                dev_err(dev, "DMA mapping error for TX.\n");
 539                return;
 540        }
 541        desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
 542                                        DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
 543        if (!desc) {
 544                dev_err(dev, "We cannot prepare for the TX slave dma!\n");
 545                return;
 546        }
 547        desc->callback = dma_tx_callback;
 548        desc->callback_param = sport;
 549
 550        dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
 551                        uart_circ_chars_pending(xmit));
 552        /* fire it */
 553        sport->dma_is_txing = 1;
 554        dmaengine_submit(desc);
 555        dma_async_issue_pending(chan);
 556        return;
 557}
 558
 559/*
 560 * interrupts disabled on entry
 561 */
 562static void imx_start_tx(struct uart_port *port)
 563{
 564        struct imx_port *sport = (struct imx_port *)port;
 565        unsigned long temp;
 566
 567        if (USE_IRDA(sport)) {
 568                /* half duplex in IrDA mode; have to disable receive mode */
 569                temp = readl(sport->port.membase + UCR4);
 570                temp &= ~(UCR4_DREN);
 571                writel(temp, sport->port.membase + UCR4);
 572
 573                temp = readl(sport->port.membase + UCR1);
 574                temp &= ~(UCR1_RRDYEN);
 575                writel(temp, sport->port.membase + UCR1);
 576        }
 577        /* Clear any pending ORE flag before enabling interrupt */
 578        temp = readl(sport->port.membase + USR2);
 579        writel(temp | USR2_ORE, sport->port.membase + USR2);
 580
 581        temp = readl(sport->port.membase + UCR4);
 582        temp |= UCR4_OREN;
 583        writel(temp, sport->port.membase + UCR4);
 584
 585        if (!sport->dma_is_enabled) {
 586                temp = readl(sport->port.membase + UCR1);
 587                writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
 588        }
 589
 590        if (USE_IRDA(sport)) {
 591                temp = readl(sport->port.membase + UCR1);
 592                temp |= UCR1_TRDYEN;
 593                writel(temp, sport->port.membase + UCR1);
 594
 595                temp = readl(sport->port.membase + UCR4);
 596                temp |= UCR4_TCEN;
 597                writel(temp, sport->port.membase + UCR4);
 598        }
 599
 600        if (sport->dma_is_enabled) {
 601                imx_dma_tx(sport);
 602                return;
 603        }
 604
 605        if (readl(sport->port.membase + uts_reg(sport)) & UTS_TXEMPTY)
 606                imx_transmit_buffer(sport);
 607}
 608
 609static irqreturn_t imx_rtsint(int irq, void *dev_id)
 610{
 611        struct imx_port *sport = dev_id;
 612        unsigned int val;
 613        unsigned long flags;
 614
 615        spin_lock_irqsave(&sport->port.lock, flags);
 616
 617        writel(USR1_RTSD, sport->port.membase + USR1);
 618        val = readl(sport->port.membase + USR1) & USR1_RTSS;
 619        uart_handle_cts_change(&sport->port, !!val);
 620        wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
 621
 622        spin_unlock_irqrestore(&sport->port.lock, flags);
 623        return IRQ_HANDLED;
 624}
 625
 626static irqreturn_t imx_txint(int irq, void *dev_id)
 627{
 628        struct imx_port *sport = dev_id;
 629        struct circ_buf *xmit = &sport->port.state->xmit;
 630        unsigned long flags;
 631
 632        spin_lock_irqsave(&sport->port.lock, flags);
 633        if (sport->port.x_char) {
 634                /* Send next char */
 635                writel(sport->port.x_char, sport->port.membase + URTX0);
 636                goto out;
 637        }
 638
 639        if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
 640                imx_stop_tx(&sport->port);
 641                goto out;
 642        }
 643
 644        imx_transmit_buffer(sport);
 645
 646        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 647                uart_write_wakeup(&sport->port);
 648
 649out:
 650        spin_unlock_irqrestore(&sport->port.lock, flags);
 651        return IRQ_HANDLED;
 652}
 653
 654static irqreturn_t imx_rxint(int irq, void *dev_id)
 655{
 656        struct imx_port *sport = dev_id;
 657        unsigned int rx, flg, ignored = 0;
 658        struct tty_port *port = &sport->port.state->port;
 659        unsigned long flags, temp;
 660
 661        spin_lock_irqsave(&sport->port.lock, flags);
 662
 663        while (readl(sport->port.membase + USR2) & USR2_RDR) {
 664                flg = TTY_NORMAL;
 665                sport->port.icount.rx++;
 666
 667                rx = readl(sport->port.membase + URXD0);
 668
 669                temp = readl(sport->port.membase + USR2);
 670                if (temp & USR2_BRCD) {
 671                        writel(USR2_BRCD, sport->port.membase + USR2);
 672                        if (uart_handle_break(&sport->port))
 673                                continue;
 674                }
 675
 676                if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
 677                        continue;
 678
 679                if (unlikely(rx & URXD_ERR)) {
 680                        if (rx & URXD_BRK)
 681                                sport->port.icount.brk++;
 682                        else if (rx & URXD_PRERR)
 683                                sport->port.icount.parity++;
 684                        else if (rx & URXD_FRMERR)
 685                                sport->port.icount.frame++;
 686                        if (rx & URXD_OVRRUN)
 687                                sport->port.icount.overrun++;
 688
 689                        if (rx & sport->port.ignore_status_mask) {
 690                                if (++ignored > 100)
 691                                        goto out;
 692                                continue;
 693                        }
 694
 695                        rx &= sport->port.read_status_mask;
 696
 697                        if (rx & URXD_BRK)
 698                                flg = TTY_BREAK;
 699                        else if (rx & URXD_PRERR)
 700                                flg = TTY_PARITY;
 701                        else if (rx & URXD_FRMERR)
 702                                flg = TTY_FRAME;
 703                        if (rx & URXD_OVRRUN)
 704                                flg = TTY_OVERRUN;
 705
 706#ifdef SUPPORT_SYSRQ
 707                        sport->port.sysrq = 0;
 708#endif
 709                }
 710
 711                tty_insert_flip_char(port, rx, flg);
 712        }
 713
 714out:
 715        spin_unlock_irqrestore(&sport->port.lock, flags);
 716        tty_flip_buffer_push(port);
 717        return IRQ_HANDLED;
 718}
 719
 720static int start_rx_dma(struct imx_port *sport);
 721/*
 722 * If the RXFIFO is filled with some data, and then we
 723 * arise a DMA operation to receive them.
 724 */
 725static void imx_dma_rxint(struct imx_port *sport)
 726{
 727        unsigned long temp;
 728
 729        temp = readl(sport->port.membase + USR2);
 730        if ((temp & USR2_RDR) && !sport->dma_is_rxing) {
 731                sport->dma_is_rxing = 1;
 732
 733                /* disable the `Recerver Ready Interrrupt` */
 734                temp = readl(sport->port.membase + UCR1);
 735                temp &= ~(UCR1_RRDYEN);
 736                writel(temp, sport->port.membase + UCR1);
 737
 738                /* tell the DMA to receive the data. */
 739                start_rx_dma(sport);
 740        }
 741}
 742
 743static irqreturn_t imx_int(int irq, void *dev_id)
 744{
 745        struct imx_port *sport = dev_id;
 746        unsigned int sts;
 747        unsigned int sts2;
 748
 749        sts = readl(sport->port.membase + USR1);
 750
 751        if (sts & USR1_RRDY) {
 752                if (sport->dma_is_enabled)
 753                        imx_dma_rxint(sport);
 754                else
 755                        imx_rxint(irq, dev_id);
 756        }
 757
 758        if (sts & USR1_TRDY &&
 759                        readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN)
 760                imx_txint(irq, dev_id);
 761
 762        if (sts & USR1_RTSD)
 763                imx_rtsint(irq, dev_id);
 764
 765        if (sts & USR1_AWAKE)
 766                writel(USR1_AWAKE, sport->port.membase + USR1);
 767
 768        sts2 = readl(sport->port.membase + USR2);
 769        if (sts2 & USR2_ORE) {
 770                dev_err(sport->port.dev, "Rx FIFO overrun\n");
 771                sport->port.icount.overrun++;
 772                writel(sts2 | USR2_ORE, sport->port.membase + USR2);
 773        }
 774
 775        return IRQ_HANDLED;
 776}
 777
 778/*
 779 * Return TIOCSER_TEMT when transmitter is not busy.
 780 */
 781static unsigned int imx_tx_empty(struct uart_port *port)
 782{
 783        struct imx_port *sport = (struct imx_port *)port;
 784        unsigned int ret;
 785
 786        ret = (readl(sport->port.membase + USR2) & USR2_TXDC) ?  TIOCSER_TEMT : 0;
 787
 788        /* If the TX DMA is working, return 0. */
 789        if (sport->dma_is_enabled && sport->dma_is_txing)
 790                ret = 0;
 791
 792        return ret;
 793}
 794
 795/*
 796 * We have a modem side uart, so the meanings of RTS and CTS are inverted.
 797 */
 798static unsigned int imx_get_mctrl(struct uart_port *port)
 799{
 800        struct imx_port *sport = (struct imx_port *)port;
 801        unsigned int tmp = TIOCM_DSR | TIOCM_CAR;
 802
 803        if (readl(sport->port.membase + USR1) & USR1_RTSS)
 804                tmp |= TIOCM_CTS;
 805
 806        if (readl(sport->port.membase + UCR2) & UCR2_CTS)
 807                tmp |= TIOCM_RTS;
 808
 809        return tmp;
 810}
 811
 812static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
 813{
 814        struct imx_port *sport = (struct imx_port *)port;
 815        unsigned long temp;
 816
 817        temp = readl(sport->port.membase + UCR2) & ~UCR2_CTS;
 818
 819        if (mctrl & TIOCM_RTS)
 820                if (!sport->dma_is_enabled)
 821                        temp |= UCR2_CTS;
 822
 823        writel(temp, sport->port.membase + UCR2);
 824}
 825
 826/*
 827 * Interrupts always disabled.
 828 */
 829static void imx_break_ctl(struct uart_port *port, int break_state)
 830{
 831        struct imx_port *sport = (struct imx_port *)port;
 832        unsigned long flags, temp;
 833
 834        spin_lock_irqsave(&sport->port.lock, flags);
 835
 836        temp = readl(sport->port.membase + UCR1) & ~UCR1_SNDBRK;
 837
 838        if (break_state != 0)
 839                temp |= UCR1_SNDBRK;
 840
 841        writel(temp, sport->port.membase + UCR1);
 842
 843        spin_unlock_irqrestore(&sport->port.lock, flags);
 844}
 845
 846#define TXTL 2 /* reset default */
 847#define RXTL 1 /* reset default */
 848
 849static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
 850{
 851        unsigned int val;
 852
 853        /* set receiver / transmitter trigger level */
 854        val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE);
 855        val |= TXTL << UFCR_TXTL_SHF | RXTL;
 856        writel(val, sport->port.membase + UFCR);
 857        return 0;
 858}
 859
 860#define RX_BUF_SIZE     (PAGE_SIZE)
 861static void imx_rx_dma_done(struct imx_port *sport)
 862{
 863        unsigned long temp;
 864
 865        /* Enable this interrupt when the RXFIFO is empty. */
 866        temp = readl(sport->port.membase + UCR1);
 867        temp |= UCR1_RRDYEN;
 868        writel(temp, sport->port.membase + UCR1);
 869
 870        sport->dma_is_rxing = 0;
 871
 872        /* Is the shutdown waiting for us? */
 873        if (waitqueue_active(&sport->dma_wait))
 874                wake_up(&sport->dma_wait);
 875}
 876
 877/*
 878 * There are three kinds of RX DMA interrupts(such as in the MX6Q):
 879 *   [1] the RX DMA buffer is full.
 880 *   [2] the Aging timer expires(wait for 8 bytes long)
 881 *   [3] the Idle Condition Detect(enabled the UCR4_IDDMAEN).
 882 *
 883 * The [2] is trigger when a character was been sitting in the FIFO
 884 * meanwhile [3] can wait for 32 bytes long when the RX line is
 885 * on IDLE state and RxFIFO is empty.
 886 */
 887static void dma_rx_callback(void *data)
 888{
 889        struct imx_port *sport = data;
 890        struct dma_chan *chan = sport->dma_chan_rx;
 891        struct scatterlist *sgl = &sport->rx_sgl;
 892        struct tty_port *port = &sport->port.state->port;
 893        struct dma_tx_state state;
 894        enum dma_status status;
 895        unsigned int count;
 896
 897        /* unmap it first */
 898        dma_unmap_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE);
 899
 900        status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
 901        count = RX_BUF_SIZE - state.residue;
 902        dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
 903
 904        if (count) {
 905                tty_insert_flip_string(port, sport->rx_buf, count);
 906                tty_flip_buffer_push(port);
 907
 908                start_rx_dma(sport);
 909        } else
 910                imx_rx_dma_done(sport);
 911}
 912
 913static int start_rx_dma(struct imx_port *sport)
 914{
 915        struct scatterlist *sgl = &sport->rx_sgl;
 916        struct dma_chan *chan = sport->dma_chan_rx;
 917        struct device *dev = sport->port.dev;
 918        struct dma_async_tx_descriptor *desc;
 919        int ret;
 920
 921        sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
 922        ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
 923        if (ret == 0) {
 924                dev_err(dev, "DMA mapping error for RX.\n");
 925                return -EINVAL;
 926        }
 927        desc = dmaengine_prep_slave_sg(chan, sgl, 1, DMA_DEV_TO_MEM,
 928                                        DMA_PREP_INTERRUPT);
 929        if (!desc) {
 930                dev_err(dev, "We cannot prepare for the RX slave dma!\n");
 931                return -EINVAL;
 932        }
 933        desc->callback = dma_rx_callback;
 934        desc->callback_param = sport;
 935
 936        dev_dbg(dev, "RX: prepare for the DMA.\n");
 937        dmaengine_submit(desc);
 938        dma_async_issue_pending(chan);
 939        return 0;
 940}
 941
 942static void imx_uart_dma_exit(struct imx_port *sport)
 943{
 944        if (sport->dma_chan_rx) {
 945                dma_release_channel(sport->dma_chan_rx);
 946                sport->dma_chan_rx = NULL;
 947
 948                kfree(sport->rx_buf);
 949                sport->rx_buf = NULL;
 950        }
 951
 952        if (sport->dma_chan_tx) {
 953                dma_release_channel(sport->dma_chan_tx);
 954                sport->dma_chan_tx = NULL;
 955        }
 956
 957        sport->dma_is_inited = 0;
 958}
 959
 960static int imx_uart_dma_init(struct imx_port *sport)
 961{
 962        struct dma_slave_config slave_config = {};
 963        struct device *dev = sport->port.dev;
 964        int ret;
 965
 966        /* Prepare for RX : */
 967        sport->dma_chan_rx = dma_request_slave_channel(dev, "rx");
 968        if (!sport->dma_chan_rx) {
 969                dev_dbg(dev, "cannot get the DMA channel.\n");
 970                ret = -EINVAL;
 971                goto err;
 972        }
 973
 974        slave_config.direction = DMA_DEV_TO_MEM;
 975        slave_config.src_addr = sport->port.mapbase + URXD0;
 976        slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 977        slave_config.src_maxburst = RXTL;
 978        ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config);
 979        if (ret) {
 980                dev_err(dev, "error in RX dma configuration.\n");
 981                goto err;
 982        }
 983
 984        sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
 985        if (!sport->rx_buf) {
 986                dev_err(dev, "cannot alloc DMA buffer.\n");
 987                ret = -ENOMEM;
 988                goto err;
 989        }
 990
 991        /* Prepare for TX : */
 992        sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
 993        if (!sport->dma_chan_tx) {
 994                dev_err(dev, "cannot get the TX DMA channel!\n");
 995                ret = -EINVAL;
 996                goto err;
 997        }
 998
 999        slave_config.direction = DMA_MEM_TO_DEV;
1000        slave_config.dst_addr = sport->port.mapbase + URTX0;
1001        slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1002        slave_config.dst_maxburst = TXTL;
1003        ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config);
1004        if (ret) {
1005                dev_err(dev, "error in TX dma configuration.");
1006                goto err;
1007        }
1008
1009        sport->dma_is_inited = 1;
1010
1011        return 0;
1012err:
1013        imx_uart_dma_exit(sport);
1014        return ret;
1015}
1016
1017static void imx_enable_dma(struct imx_port *sport)
1018{
1019        unsigned long temp;
1020
1021        init_waitqueue_head(&sport->dma_wait);
1022
1023        /* set UCR1 */
1024        temp = readl(sport->port.membase + UCR1);
1025        temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN |
1026                /* wait for 32 idle frames for IDDMA interrupt */
1027                UCR1_ICD_REG(3);
1028        writel(temp, sport->port.membase + UCR1);
1029
1030        /* set UCR4 */
1031        temp = readl(sport->port.membase + UCR4);
1032        temp |= UCR4_IDDMAEN;
1033        writel(temp, sport->port.membase + UCR4);
1034
1035        sport->dma_is_enabled = 1;
1036}
1037
1038static void imx_disable_dma(struct imx_port *sport)
1039{
1040        unsigned long temp;
1041
1042        /* clear UCR1 */
1043        temp = readl(sport->port.membase + UCR1);
1044        temp &= ~(UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN);
1045        writel(temp, sport->port.membase + UCR1);
1046
1047        /* clear UCR2 */
1048        temp = readl(sport->port.membase + UCR2);
1049        temp &= ~(UCR2_CTSC | UCR2_CTS);
1050        writel(temp, sport->port.membase + UCR2);
1051
1052        /* clear UCR4 */
1053        temp = readl(sport->port.membase + UCR4);
1054        temp &= ~UCR4_IDDMAEN;
1055        writel(temp, sport->port.membase + UCR4);
1056
1057        sport->dma_is_enabled = 0;
1058}
1059
1060/* half the RX buffer size */
1061#define CTSTL 16
1062
1063static int imx_startup(struct uart_port *port)
1064{
1065        struct imx_port *sport = (struct imx_port *)port;
1066        int retval;
1067        unsigned long flags, temp;
1068
1069        retval = clk_prepare_enable(sport->clk_per);
1070        if (retval)
1071                goto error_out1;
1072        retval = clk_prepare_enable(sport->clk_ipg);
1073        if (retval) {
1074                clk_disable_unprepare(sport->clk_per);
1075                goto error_out1;
1076        }
1077
1078        imx_setup_ufcr(sport, 0);
1079
1080        /* disable the DREN bit (Data Ready interrupt enable) before
1081         * requesting IRQs
1082         */
1083        temp = readl(sport->port.membase + UCR4);
1084
1085        if (USE_IRDA(sport))
1086                temp |= UCR4_IRSC;
1087
1088        /* set the trigger level for CTS */
1089        temp &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF);
1090        temp |= CTSTL << UCR4_CTSTL_SHF;
1091
1092        writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
1093
1094        if (USE_IRDA(sport)) {
1095                /* reset fifo's and state machines */
1096                int i = 100;
1097                temp = readl(sport->port.membase + UCR2);
1098                temp &= ~UCR2_SRST;
1099                writel(temp, sport->port.membase + UCR2);
1100                while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) &&
1101                    (--i > 0)) {
1102                        udelay(1);
1103                }
1104        }
1105
1106        /*
1107         * Allocate the IRQ(s) i.MX1 has three interrupts whereas later
1108         * chips only have one interrupt.
1109         */
1110        if (sport->txirq > 0) {
1111                retval = request_irq(sport->rxirq, imx_rxint, 0,
1112                                DRIVER_NAME, sport);
1113                if (retval)
1114                        goto error_out1;
1115
1116                retval = request_irq(sport->txirq, imx_txint, 0,
1117                                DRIVER_NAME, sport);
1118                if (retval)
1119                        goto error_out2;
1120
1121                /* do not use RTS IRQ on IrDA */
1122                if (!USE_IRDA(sport)) {
1123                        retval = request_irq(sport->rtsirq, imx_rtsint, 0,
1124                                        DRIVER_NAME, sport);
1125                        if (retval)
1126                                goto error_out3;
1127                }
1128        } else {
1129                retval = request_irq(sport->port.irq, imx_int, 0,
1130                                DRIVER_NAME, sport);
1131                if (retval) {
1132                        free_irq(sport->port.irq, sport);
1133                        goto error_out1;
1134                }
1135        }
1136
1137        spin_lock_irqsave(&sport->port.lock, flags);
1138        /*
1139         * Finally, clear and enable interrupts
1140         */
1141        writel(USR1_RTSD, sport->port.membase + USR1);
1142
1143        temp = readl(sport->port.membase + UCR1);
1144        temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
1145
1146        if (USE_IRDA(sport)) {
1147                temp |= UCR1_IREN;
1148                temp &= ~(UCR1_RTSDEN);
1149        }
1150
1151        writel(temp, sport->port.membase + UCR1);
1152
1153        temp = readl(sport->port.membase + UCR2);
1154        temp |= (UCR2_RXEN | UCR2_TXEN);
1155        if (!sport->have_rtscts)
1156                temp |= UCR2_IRTS;
1157        writel(temp, sport->port.membase + UCR2);
1158
1159        if (USE_IRDA(sport)) {
1160                /* clear RX-FIFO */
1161                int i = 64;
1162                while ((--i > 0) &&
1163                        (readl(sport->port.membase + URXD0) & URXD_CHARRDY)) {
1164                        barrier();
1165                }
1166        }
1167
1168        if (!is_imx1_uart(sport)) {
1169                temp = readl(sport->port.membase + UCR3);
1170                temp |= IMX21_UCR3_RXDMUXSEL;
1171                writel(temp, sport->port.membase + UCR3);
1172        }
1173
1174        if (USE_IRDA(sport)) {
1175                temp = readl(sport->port.membase + UCR4);
1176                if (sport->irda_inv_rx)
1177                        temp |= UCR4_INVR;
1178                else
1179                        temp &= ~(UCR4_INVR);
1180                writel(temp | UCR4_DREN, sport->port.membase + UCR4);
1181
1182                temp = readl(sport->port.membase + UCR3);
1183                if (sport->irda_inv_tx)
1184                        temp |= UCR3_INVT;
1185                else
1186                        temp &= ~(UCR3_INVT);
1187                writel(temp, sport->port.membase + UCR3);
1188        }
1189
1190        /*
1191         * Enable modem status interrupts
1192         */
1193        imx_enable_ms(&sport->port);
1194        spin_unlock_irqrestore(&sport->port.lock, flags);
1195
1196        if (USE_IRDA(sport)) {
1197                struct imxuart_platform_data *pdata;
1198                pdata = dev_get_platdata(sport->port.dev);
1199                sport->irda_inv_rx = pdata->irda_inv_rx;
1200                sport->irda_inv_tx = pdata->irda_inv_tx;
1201                sport->trcv_delay = pdata->transceiver_delay;
1202                if (pdata->irda_enable)
1203                        pdata->irda_enable(1);
1204        }
1205
1206        return 0;
1207
1208error_out3:
1209        if (sport->txirq)
1210                free_irq(sport->txirq, sport);
1211error_out2:
1212        if (sport->rxirq)
1213                free_irq(sport->rxirq, sport);
1214error_out1:
1215        return retval;
1216}
1217
1218static void imx_shutdown(struct uart_port *port)
1219{
1220        struct imx_port *sport = (struct imx_port *)port;
1221        unsigned long temp;
1222        unsigned long flags;
1223
1224        if (sport->dma_is_enabled) {
1225                /* We have to wait for the DMA to finish. */
1226                wait_event(sport->dma_wait,
1227                        !sport->dma_is_rxing && !sport->dma_is_txing);
1228                imx_stop_rx(port);
1229                imx_disable_dma(sport);
1230                imx_uart_dma_exit(sport);
1231        }
1232
1233        spin_lock_irqsave(&sport->port.lock, flags);
1234        temp = readl(sport->port.membase + UCR2);
1235        temp &= ~(UCR2_TXEN);
1236        writel(temp, sport->port.membase + UCR2);
1237        spin_unlock_irqrestore(&sport->port.lock, flags);
1238
1239        if (USE_IRDA(sport)) {
1240                struct imxuart_platform_data *pdata;
1241                pdata = dev_get_platdata(sport->port.dev);
1242                if (pdata->irda_enable)
1243                        pdata->irda_enable(0);
1244        }
1245
1246        /*
1247         * Stop our timer.
1248         */
1249        del_timer_sync(&sport->timer);
1250
1251        /*
1252         * Free the interrupts
1253         */
1254        if (sport->txirq > 0) {
1255                if (!USE_IRDA(sport))
1256                        free_irq(sport->rtsirq, sport);
1257                free_irq(sport->txirq, sport);
1258                free_irq(sport->rxirq, sport);
1259        } else
1260                free_irq(sport->port.irq, sport);
1261
1262        /*
1263         * Disable all interrupts, port and break condition.
1264         */
1265
1266        spin_lock_irqsave(&sport->port.lock, flags);
1267        temp = readl(sport->port.membase + UCR1);
1268        temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
1269        if (USE_IRDA(sport))
1270                temp &= ~(UCR1_IREN);
1271
1272        writel(temp, sport->port.membase + UCR1);
1273        spin_unlock_irqrestore(&sport->port.lock, flags);
1274
1275        clk_disable_unprepare(sport->clk_per);
1276        clk_disable_unprepare(sport->clk_ipg);
1277}
1278
1279static void imx_flush_buffer(struct uart_port *port)
1280{
1281        struct imx_port *sport = (struct imx_port *)port;
1282
1283        if (sport->dma_is_enabled) {
1284                sport->tx_bytes = 0;
1285                dmaengine_terminate_all(sport->dma_chan_tx);
1286        }
1287}
1288
1289static void
1290imx_set_termios(struct uart_port *port, struct ktermios *termios,
1291                   struct ktermios *old)
1292{
1293        struct imx_port *sport = (struct imx_port *)port;
1294        unsigned long flags;
1295        unsigned int ucr2, old_ucr1, old_txrxen, baud, quot;
1296        unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
1297        unsigned int div, ufcr;
1298        unsigned long num, denom;
1299        uint64_t tdiv64;
1300
1301        /*
1302         * If we don't support modem control lines, don't allow
1303         * these to be set.
1304         */
1305        if (0) {
1306                termios->c_cflag &= ~(HUPCL | CRTSCTS | CMSPAR);
1307                termios->c_cflag |= CLOCAL;
1308        }
1309
1310        /*
1311         * We only support CS7 and CS8.
1312         */
1313        while ((termios->c_cflag & CSIZE) != CS7 &&
1314               (termios->c_cflag & CSIZE) != CS8) {
1315                termios->c_cflag &= ~CSIZE;
1316                termios->c_cflag |= old_csize;
1317                old_csize = CS8;
1318        }
1319
1320        if ((termios->c_cflag & CSIZE) == CS8)
1321                ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
1322        else
1323                ucr2 = UCR2_SRST | UCR2_IRTS;
1324
1325        if (termios->c_cflag & CRTSCTS) {
1326                if (sport->have_rtscts) {
1327                        ucr2 &= ~UCR2_IRTS;
1328                        ucr2 |= UCR2_CTSC;
1329
1330                        /* Can we enable the DMA support? */
1331                        if (is_imx6q_uart(sport) && !uart_console(port)
1332                                && !sport->dma_is_inited)
1333                                imx_uart_dma_init(sport);
1334                } else {
1335                        termios->c_cflag &= ~CRTSCTS;
1336                }
1337        }
1338
1339        if (termios->c_cflag & CSTOPB)
1340                ucr2 |= UCR2_STPB;
1341        if (termios->c_cflag & PARENB) {
1342                ucr2 |= UCR2_PREN;
1343                if (termios->c_cflag & PARODD)
1344                        ucr2 |= UCR2_PROE;
1345        }
1346
1347        del_timer_sync(&sport->timer);
1348
1349        /*
1350         * Ask the core to calculate the divisor for us.
1351         */
1352        baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
1353        quot = uart_get_divisor(port, baud);
1354
1355        spin_lock_irqsave(&sport->port.lock, flags);
1356
1357        sport->port.read_status_mask = 0;
1358        if (termios->c_iflag & INPCK)
1359                sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR);
1360        if (termios->c_iflag & (BRKINT | PARMRK))
1361                sport->port.read_status_mask |= URXD_BRK;
1362
1363        /*
1364         * Characters to ignore
1365         */
1366        sport->port.ignore_status_mask = 0;
1367        if (termios->c_iflag & IGNPAR)
1368                sport->port.ignore_status_mask |= URXD_PRERR;
1369        if (termios->c_iflag & IGNBRK) {
1370                sport->port.ignore_status_mask |= URXD_BRK;
1371                /*
1372                 * If we're ignoring parity and break indicators,
1373                 * ignore overruns too (for real raw support).
1374                 */
1375                if (termios->c_iflag & IGNPAR)
1376                        sport->port.ignore_status_mask |= URXD_OVRRUN;
1377        }
1378
1379        /*
1380         * Update the per-port timeout.
1381         */
1382        uart_update_timeout(port, termios->c_cflag, baud);
1383
1384        /*
1385         * disable interrupts and drain transmitter
1386         */
1387        old_ucr1 = readl(sport->port.membase + UCR1);
1388        writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN),
1389                        sport->port.membase + UCR1);
1390
1391        while (!(readl(sport->port.membase + USR2) & USR2_TXDC))
1392                barrier();
1393
1394        /* then, disable everything */
1395        old_txrxen = readl(sport->port.membase + UCR2);
1396        writel(old_txrxen & ~(UCR2_TXEN | UCR2_RXEN),
1397                        sport->port.membase + UCR2);
1398        old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
1399
1400        if (USE_IRDA(sport)) {
1401                /*
1402                 * use maximum available submodule frequency to
1403                 * avoid missing short pulses due to low sampling rate
1404                 */
1405                div = 1;
1406        } else {
1407                /* custom-baudrate handling */
1408                div = sport->port.uartclk / (baud * 16);
1409                if (baud == 38400 && quot != div)
1410                        baud = sport->port.uartclk / (quot * 16);
1411
1412                div = sport->port.uartclk / (baud * 16);
1413                if (div > 7)
1414                        div = 7;
1415                if (!div)
1416                        div = 1;
1417        }
1418
1419        rational_best_approximation(16 * div * baud, sport->port.uartclk,
1420                1 << 16, 1 << 16, &num, &denom);
1421
1422        tdiv64 = sport->port.uartclk;
1423        tdiv64 *= num;
1424        do_div(tdiv64, denom * 16 * div);
1425        tty_termios_encode_baud_rate(termios,
1426                                (speed_t)tdiv64, (speed_t)tdiv64);
1427
1428        num -= 1;
1429        denom -= 1;
1430
1431        ufcr = readl(sport->port.membase + UFCR);
1432        ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
1433        if (sport->dte_mode)
1434                ufcr |= UFCR_DCEDTE;
1435        writel(ufcr, sport->port.membase + UFCR);
1436
1437        writel(num, sport->port.membase + UBIR);
1438        writel(denom, sport->port.membase + UBMR);
1439
1440        if (!is_imx1_uart(sport))
1441                writel(sport->port.uartclk / div / 1000,
1442                                sport->port.membase + IMX21_ONEMS);
1443
1444        writel(old_ucr1, sport->port.membase + UCR1);
1445
1446        /* set the parity, stop bits and data size */
1447        writel(ucr2 | old_txrxen, sport->port.membase + UCR2);
1448
1449        if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
1450                imx_enable_ms(&sport->port);
1451
1452        if (sport->dma_is_inited && !sport->dma_is_enabled)
1453                imx_enable_dma(sport);
1454        spin_unlock_irqrestore(&sport->port.lock, flags);
1455}
1456
1457static const char *imx_type(struct uart_port *port)
1458{
1459        struct imx_port *sport = (struct imx_port *)port;
1460
1461        return sport->port.type == PORT_IMX ? "IMX" : NULL;
1462}
1463
1464/*
1465 * Release the memory region(s) being used by 'port'.
1466 */
1467static void imx_release_port(struct uart_port *port)
1468{
1469        struct platform_device *pdev = to_platform_device(port->dev);
1470        struct resource *mmres;
1471
1472        mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1473        release_mem_region(mmres->start, resource_size(mmres));
1474}
1475
1476/*
1477 * Request the memory region(s) being used by 'port'.
1478 */
1479static int imx_request_port(struct uart_port *port)
1480{
1481        struct platform_device *pdev = to_platform_device(port->dev);
1482        struct resource *mmres;
1483        void *ret;
1484
1485        mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1486        if (!mmres)
1487                return -ENODEV;
1488
1489        ret = request_mem_region(mmres->start, resource_size(mmres), "imx-uart");
1490
1491        return  ret ? 0 : -EBUSY;
1492}
1493
1494/*
1495 * Configure/autoconfigure the port.
1496 */
1497static void imx_config_port(struct uart_port *port, int flags)
1498{
1499        struct imx_port *sport = (struct imx_port *)port;
1500
1501        if (flags & UART_CONFIG_TYPE &&
1502            imx_request_port(&sport->port) == 0)
1503                sport->port.type = PORT_IMX;
1504}
1505
1506/*
1507 * Verify the new serial_struct (for TIOCSSERIAL).
1508 * The only change we allow are to the flags and type, and
1509 * even then only between PORT_IMX and PORT_UNKNOWN
1510 */
1511static int
1512imx_verify_port(struct uart_port *port, struct serial_struct *ser)
1513{
1514        struct imx_port *sport = (struct imx_port *)port;
1515        int ret = 0;
1516
1517        if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX)
1518                ret = -EINVAL;
1519        if (sport->port.irq != ser->irq)
1520                ret = -EINVAL;
1521        if (ser->io_type != UPIO_MEM)
1522                ret = -EINVAL;
1523        if (sport->port.uartclk / 16 != ser->baud_base)
1524                ret = -EINVAL;
1525        if (sport->port.mapbase != (unsigned long)ser->iomem_base)
1526                ret = -EINVAL;
1527        if (sport->port.iobase != ser->port)
1528                ret = -EINVAL;
1529        if (ser->hub6 != 0)
1530                ret = -EINVAL;
1531        return ret;
1532}
1533
1534#if defined(CONFIG_CONSOLE_POLL)
1535static int imx_poll_get_char(struct uart_port *port)
1536{
1537        struct imx_port_ucrs old_ucr;
1538        unsigned int status;
1539        unsigned char c;
1540
1541        /* save control registers */
1542        imx_port_ucrs_save(port, &old_ucr);
1543
1544        /* disable interrupts */
1545        writel(UCR1_UARTEN, port->membase + UCR1);
1546        writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI),
1547               port->membase + UCR2);
1548        writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN),
1549               port->membase + UCR3);
1550
1551        /* poll */
1552        do {
1553                status = readl(port->membase + USR2);
1554        } while (~status & USR2_RDR);
1555
1556        /* read */
1557        c = readl(port->membase + URXD0);
1558
1559        /* restore control registers */
1560        imx_port_ucrs_restore(port, &old_ucr);
1561
1562        return c;
1563}
1564
1565static void imx_poll_put_char(struct uart_port *port, unsigned char c)
1566{
1567        struct imx_port_ucrs old_ucr;
1568        unsigned int status;
1569
1570        /* save control registers */
1571        imx_port_ucrs_save(port, &old_ucr);
1572
1573        /* disable interrupts */
1574        writel(UCR1_UARTEN, port->membase + UCR1);
1575        writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI),
1576               port->membase + UCR2);
1577        writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN),
1578               port->membase + UCR3);
1579
1580        /* drain */
1581        do {
1582                status = readl(port->membase + USR1);
1583        } while (~status & USR1_TRDY);
1584
1585        /* write */
1586        writel(c, port->membase + URTX0);
1587
1588        /* flush */
1589        do {
1590                status = readl(port->membase + USR2);
1591        } while (~status & USR2_TXDC);
1592
1593        /* restore control registers */
1594        imx_port_ucrs_restore(port, &old_ucr);
1595}
1596#endif
1597
1598static struct uart_ops imx_pops = {
1599        .tx_empty       = imx_tx_empty,
1600        .set_mctrl      = imx_set_mctrl,
1601        .get_mctrl      = imx_get_mctrl,
1602        .stop_tx        = imx_stop_tx,
1603        .start_tx       = imx_start_tx,
1604        .stop_rx        = imx_stop_rx,
1605        .enable_ms      = imx_enable_ms,
1606        .break_ctl      = imx_break_ctl,
1607        .startup        = imx_startup,
1608        .shutdown       = imx_shutdown,
1609        .flush_buffer   = imx_flush_buffer,
1610        .set_termios    = imx_set_termios,
1611        .type           = imx_type,
1612        .release_port   = imx_release_port,
1613        .request_port   = imx_request_port,
1614        .config_port    = imx_config_port,
1615        .verify_port    = imx_verify_port,
1616#if defined(CONFIG_CONSOLE_POLL)
1617        .poll_get_char  = imx_poll_get_char,
1618        .poll_put_char  = imx_poll_put_char,
1619#endif
1620};
1621
1622static struct imx_port *imx_ports[UART_NR];
1623
1624#ifdef CONFIG_SERIAL_IMX_CONSOLE
1625static void imx_console_putchar(struct uart_port *port, int ch)
1626{
1627        struct imx_port *sport = (struct imx_port *)port;
1628
1629        while (readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)
1630                barrier();
1631
1632        writel(ch, sport->port.membase + URTX0);
1633}
1634
1635/*
1636 * Interrupts are disabled on entering
1637 */
1638static void
1639imx_console_write(struct console *co, const char *s, unsigned int count)
1640{
1641        struct imx_port *sport = imx_ports[co->index];
1642        struct imx_port_ucrs old_ucr;
1643        unsigned int ucr1;
1644        unsigned long flags = 0;
1645        int locked = 1;
1646        int retval;
1647
1648        retval = clk_enable(sport->clk_per);
1649        if (retval)
1650                return;
1651        retval = clk_enable(sport->clk_ipg);
1652        if (retval) {
1653                clk_disable(sport->clk_per);
1654                return;
1655        }
1656
1657        if (sport->port.sysrq)
1658                locked = 0;
1659        else if (oops_in_progress)
1660                locked = spin_trylock_irqsave(&sport->port.lock, flags);
1661        else
1662                spin_lock_irqsave(&sport->port.lock, flags);
1663
1664        /*
1665         *      First, save UCR1/2/3 and then disable interrupts
1666         */
1667        imx_port_ucrs_save(&sport->port, &old_ucr);
1668        ucr1 = old_ucr.ucr1;
1669
1670        if (is_imx1_uart(sport))
1671                ucr1 |= IMX1_UCR1_UARTCLKEN;
1672        ucr1 |= UCR1_UARTEN;
1673        ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN);
1674
1675        writel(ucr1, sport->port.membase + UCR1);
1676
1677        writel(old_ucr.ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
1678
1679        uart_console_write(&sport->port, s, count, imx_console_putchar);
1680
1681        /*
1682         *      Finally, wait for transmitter to become empty
1683         *      and restore UCR1/2/3
1684         */
1685        while (!(readl(sport->port.membase + USR2) & USR2_TXDC));
1686
1687        imx_port_ucrs_restore(&sport->port, &old_ucr);
1688
1689        if (locked)
1690                spin_unlock_irqrestore(&sport->port.lock, flags);
1691
1692        clk_disable(sport->clk_ipg);
1693        clk_disable(sport->clk_per);
1694}
1695
1696/*
1697 * If the port was already initialised (eg, by a boot loader),
1698 * try to determine the current setup.
1699 */
1700static void __init
1701imx_console_get_options(struct imx_port *sport, int *baud,
1702                           int *parity, int *bits)
1703{
1704
1705        if (readl(sport->port.membase + UCR1) & UCR1_UARTEN) {
1706                /* ok, the port was enabled */
1707                unsigned int ucr2, ubir, ubmr, uartclk;
1708                unsigned int baud_raw;
1709                unsigned int ucfr_rfdiv;
1710
1711                ucr2 = readl(sport->port.membase + UCR2);
1712
1713                *parity = 'n';
1714                if (ucr2 & UCR2_PREN) {
1715                        if (ucr2 & UCR2_PROE)
1716                                *parity = 'o';
1717                        else
1718                                *parity = 'e';
1719                }
1720
1721                if (ucr2 & UCR2_WS)
1722                        *bits = 8;
1723                else
1724                        *bits = 7;
1725
1726                ubir = readl(sport->port.membase + UBIR) & 0xffff;
1727                ubmr = readl(sport->port.membase + UBMR) & 0xffff;
1728
1729                ucfr_rfdiv = (readl(sport->port.membase + UFCR) & UFCR_RFDIV) >> 7;
1730                if (ucfr_rfdiv == 6)
1731                        ucfr_rfdiv = 7;
1732                else
1733                        ucfr_rfdiv = 6 - ucfr_rfdiv;
1734
1735                uartclk = clk_get_rate(sport->clk_per);
1736                uartclk /= ucfr_rfdiv;
1737
1738                {       /*
1739                         * The next code provides exact computation of
1740                         *   baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1))
1741                         * without need of float support or long long division,
1742                         * which would be required to prevent 32bit arithmetic overflow
1743                         */
1744                        unsigned int mul = ubir + 1;
1745                        unsigned int div = 16 * (ubmr + 1);
1746                        unsigned int rem = uartclk % div;
1747
1748                        baud_raw = (uartclk / div) * mul;
1749                        baud_raw += (rem * mul + div / 2) / div;
1750                        *baud = (baud_raw + 50) / 100 * 100;
1751                }
1752
1753                if (*baud != baud_raw)
1754                        pr_info("Console IMX rounded baud rate from %d to %d\n",
1755                                baud_raw, *baud);
1756        }
1757}
1758
1759static int __init
1760imx_console_setup(struct console *co, char *options)
1761{
1762        struct imx_port *sport;
1763        int baud = 9600;
1764        int bits = 8;
1765        int parity = 'n';
1766        int flow = 'n';
1767        int retval;
1768
1769        /*
1770         * Check whether an invalid uart number has been specified, and
1771         * if so, search for the first available port that does have
1772         * console support.
1773         */
1774        if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports))
1775                co->index = 0;
1776        sport = imx_ports[co->index];
1777        if (sport == NULL)
1778                return -ENODEV;
1779
1780        /* For setting the registers, we only need to enable the ipg clock. */
1781        retval = clk_prepare_enable(sport->clk_ipg);
1782        if (retval)
1783                goto error_console;
1784
1785        if (options)
1786                uart_parse_options(options, &baud, &parity, &bits, &flow);
1787        else
1788                imx_console_get_options(sport, &baud, &parity, &bits);
1789
1790        imx_setup_ufcr(sport, 0);
1791
1792        retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
1793
1794        clk_disable(sport->clk_ipg);
1795        if (retval) {
1796                clk_unprepare(sport->clk_ipg);
1797                goto error_console;
1798        }
1799
1800        retval = clk_prepare(sport->clk_per);
1801        if (retval)
1802                clk_disable_unprepare(sport->clk_ipg);
1803
1804error_console:
1805        return retval;
1806}
1807
1808static struct uart_driver imx_reg;
1809static struct console imx_console = {
1810        .name           = DEV_NAME,
1811        .write          = imx_console_write,
1812        .device         = uart_console_device,
1813        .setup          = imx_console_setup,
1814        .flags          = CON_PRINTBUFFER,
1815        .index          = -1,
1816        .data           = &imx_reg,
1817};
1818
1819#define IMX_CONSOLE     &imx_console
1820#else
1821#define IMX_CONSOLE     NULL
1822#endif
1823
1824static struct uart_driver imx_reg = {
1825        .owner          = THIS_MODULE,
1826        .driver_name    = DRIVER_NAME,
1827        .dev_name       = DEV_NAME,
1828        .major          = SERIAL_IMX_MAJOR,
1829        .minor          = MINOR_START,
1830        .nr             = ARRAY_SIZE(imx_ports),
1831        .cons           = IMX_CONSOLE,
1832};
1833
1834static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
1835{
1836        struct imx_port *sport = platform_get_drvdata(dev);
1837        unsigned int val;
1838
1839        /* enable wakeup from i.MX UART */
1840        val = readl(sport->port.membase + UCR3);
1841        val |= UCR3_AWAKEN;
1842        writel(val, sport->port.membase + UCR3);
1843
1844        uart_suspend_port(&imx_reg, &sport->port);
1845
1846        return 0;
1847}
1848
1849static int serial_imx_resume(struct platform_device *dev)
1850{
1851        struct imx_port *sport = platform_get_drvdata(dev);
1852        unsigned int val;
1853
1854        /* disable wakeup from i.MX UART */
1855        val = readl(sport->port.membase + UCR3);
1856        val &= ~UCR3_AWAKEN;
1857        writel(val, sport->port.membase + UCR3);
1858
1859        uart_resume_port(&imx_reg, &sport->port);
1860
1861        return 0;
1862}
1863
1864#ifdef CONFIG_OF
1865/*
1866 * This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it
1867 * could successfully get all information from dt or a negative errno.
1868 */
1869static int serial_imx_probe_dt(struct imx_port *sport,
1870                struct platform_device *pdev)
1871{
1872        struct device_node *np = pdev->dev.of_node;
1873        const struct of_device_id *of_id =
1874                        of_match_device(imx_uart_dt_ids, &pdev->dev);
1875        int ret;
1876
1877        if (!np)
1878                /* no device tree device */
1879                return 1;
1880
1881        ret = of_alias_get_id(np, "serial");
1882        if (ret < 0) {
1883                dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
1884                return ret;
1885        }
1886        sport->port.line = ret;
1887
1888        if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
1889                sport->have_rtscts = 1;
1890
1891        if (of_get_property(np, "fsl,irda-mode", NULL))
1892                sport->use_irda = 1;
1893
1894        if (of_get_property(np, "fsl,dte-mode", NULL))
1895                sport->dte_mode = 1;
1896
1897        sport->devdata = of_id->data;
1898
1899        return 0;
1900}
1901#else
1902static inline int serial_imx_probe_dt(struct imx_port *sport,
1903                struct platform_device *pdev)
1904{
1905        return 1;
1906}
1907#endif
1908
1909static void serial_imx_probe_pdata(struct imx_port *sport,
1910                struct platform_device *pdev)
1911{
1912        struct imxuart_platform_data *pdata = dev_get_platdata(&pdev->dev);
1913
1914        sport->port.line = pdev->id;
1915        sport->devdata = (struct imx_uart_data  *) pdev->id_entry->driver_data;
1916
1917        if (!pdata)
1918                return;
1919
1920        if (pdata->flags & IMXUART_HAVE_RTSCTS)
1921                sport->have_rtscts = 1;
1922
1923        if (pdata->flags & IMXUART_IRDA)
1924                sport->use_irda = 1;
1925}
1926
1927static int serial_imx_probe(struct platform_device *pdev)
1928{
1929        struct imx_port *sport;
1930        struct imxuart_platform_data *pdata;
1931        void __iomem *base;
1932        int ret = 0;
1933        struct resource *res;
1934
1935        sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
1936        if (!sport)
1937                return -ENOMEM;
1938
1939        ret = serial_imx_probe_dt(sport, pdev);
1940        if (ret > 0)
1941                serial_imx_probe_pdata(sport, pdev);
1942        else if (ret < 0)
1943                return ret;
1944
1945        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1946        if (!res)
1947                return -ENODEV;
1948
1949        base = devm_ioremap(&pdev->dev, res->start, PAGE_SIZE);
1950        if (!base)
1951                return -ENOMEM;
1952
1953        sport->port.dev = &pdev->dev;
1954        sport->port.mapbase = res->start;
1955        sport->port.membase = base;
1956        sport->port.type = PORT_IMX,
1957        sport->port.iotype = UPIO_MEM;
1958        sport->port.irq = platform_get_irq(pdev, 0);
1959        sport->rxirq = platform_get_irq(pdev, 0);
1960        sport->txirq = platform_get_irq(pdev, 1);
1961        sport->rtsirq = platform_get_irq(pdev, 2);
1962        sport->port.fifosize = 32;
1963        sport->port.ops = &imx_pops;
1964        sport->port.flags = UPF_BOOT_AUTOCONF;
1965        init_timer(&sport->timer);
1966        sport->timer.function = imx_timeout;
1967        sport->timer.data     = (unsigned long)sport;
1968
1969        sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1970        if (IS_ERR(sport->clk_ipg)) {
1971                ret = PTR_ERR(sport->clk_ipg);
1972                dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
1973                return ret;
1974        }
1975
1976        sport->clk_per = devm_clk_get(&pdev->dev, "per");
1977        if (IS_ERR(sport->clk_per)) {
1978                ret = PTR_ERR(sport->clk_per);
1979                dev_err(&pdev->dev, "failed to get per clk: %d\n", ret);
1980                return ret;
1981        }
1982
1983        sport->port.uartclk = clk_get_rate(sport->clk_per);
1984
1985        imx_ports[sport->port.line] = sport;
1986
1987        pdata = dev_get_platdata(&pdev->dev);
1988        if (pdata && pdata->init) {
1989                ret = pdata->init(pdev);
1990                if (ret)
1991                        return ret;
1992        }
1993
1994        ret = uart_add_one_port(&imx_reg, &sport->port);
1995        if (ret)
1996                goto deinit;
1997        platform_set_drvdata(pdev, sport);
1998
1999        return 0;
2000deinit:
2001        if (pdata && pdata->exit)
2002                pdata->exit(pdev);
2003        return ret;
2004}
2005
2006static int serial_imx_remove(struct platform_device *pdev)
2007{
2008        struct imxuart_platform_data *pdata;
2009        struct imx_port *sport = platform_get_drvdata(pdev);
2010
2011        pdata = dev_get_platdata(&pdev->dev);
2012
2013        uart_remove_one_port(&imx_reg, &sport->port);
2014
2015        if (pdata && pdata->exit)
2016                pdata->exit(pdev);
2017
2018        return 0;
2019}
2020
2021static struct platform_driver serial_imx_driver = {
2022        .probe          = serial_imx_probe,
2023        .remove         = serial_imx_remove,
2024
2025        .suspend        = serial_imx_suspend,
2026        .resume         = serial_imx_resume,
2027        .id_table       = imx_uart_devtype,
2028        .driver         = {
2029                .name   = "imx-uart",
2030                .owner  = THIS_MODULE,
2031                .of_match_table = imx_uart_dt_ids,
2032        },
2033};
2034
2035static int __init imx_serial_init(void)
2036{
2037        int ret;
2038
2039        pr_info("Serial: IMX driver\n");
2040
2041        ret = uart_register_driver(&imx_reg);
2042        if (ret)
2043                return ret;
2044
2045        ret = platform_driver_register(&serial_imx_driver);
2046        if (ret != 0)
2047                uart_unregister_driver(&imx_reg);
2048
2049        return ret;
2050}
2051
2052static void __exit imx_serial_exit(void)
2053{
2054        platform_driver_unregister(&serial_imx_driver);
2055        uart_unregister_driver(&imx_reg);
2056}
2057
2058module_init(imx_serial_init);
2059module_exit(imx_serial_exit);
2060
2061MODULE_AUTHOR("Sascha Hauer");
2062MODULE_DESCRIPTION("IMX generic serial port driver");
2063MODULE_LICENSE("GPL");
2064MODULE_ALIAS("platform:imx-uart");
2065