linux/arch/cris/arch-v32/drivers/sync_serial.c
<<
>>
Prefs
   1/*
   2 * Simple synchronous serial port driver for ETRAX FS and ARTPEC-3.
   3 *
   4 * Copyright (c) 2005, 2008 Axis Communications AB
   5 * Author: Mikael Starvik
   6 *
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/kernel.h>
  11#include <linux/types.h>
  12#include <linux/errno.h>
  13#include <linux/major.h>
  14#include <linux/sched.h>
  15#include <linux/mutex.h>
  16#include <linux/interrupt.h>
  17#include <linux/poll.h>
  18#include <linux/fs.h>
  19#include <linux/cdev.h>
  20#include <linux/device.h>
  21#include <linux/wait.h>
  22
  23#include <asm/io.h>
  24#include <mach/dma.h>
  25#include <pinmux.h>
  26#include <hwregs/reg_rdwr.h>
  27#include <hwregs/sser_defs.h>
  28#include <hwregs/timer_defs.h>
  29#include <hwregs/dma_defs.h>
  30#include <hwregs/dma.h>
  31#include <hwregs/intr_vect_defs.h>
  32#include <hwregs/intr_vect.h>
  33#include <hwregs/reg_map.h>
  34#include <asm/sync_serial.h>
  35
  36
  37/* The receiver is a bit tricky because of the continuous stream of data.*/
  38/*                                                                       */
  39/* Three DMA descriptors are linked together. Each DMA descriptor is     */
  40/* responsible for port->bufchunk of a common buffer.                    */
  41/*                                                                       */
  42/* +---------------------------------------------+                       */
  43/* |   +----------+   +----------+   +----------+ |                      */
  44/* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+                      */
  45/*     +----------+   +----------+   +----------+                        */
  46/*         |            |              |                                 */
  47/*         v            v              v                                 */
  48/*   +-------------------------------------+                             */
  49/*   |        BUFFER                       |                             */
  50/*   +-------------------------------------+                             */
  51/*      |<- data_avail ->|                                               */
  52/*    readp          writep                                              */
  53/*                                                                       */
  54/* If the application keeps up the pace readp will be right after writep.*/
  55/* If the application can't keep the pace we have to throw away data.    */
  56/* The idea is that readp should be ready with the data pointed out by   */
  57/* Descr[i] when the DMA has filled in Descr[i+1].                       */
  58/* Otherwise we will discard                                             */
  59/* the rest of the data pointed out by Descr1 and set readp to the start */
  60/* of Descr2                                                             */
  61
  62/* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
  63/* words can be handled */
  64#define IN_DESCR_SIZE SSP_INPUT_CHUNK_SIZE
  65#define NBR_IN_DESCR (8*6)
  66#define IN_BUFFER_SIZE (IN_DESCR_SIZE * NBR_IN_DESCR)
  67
  68#define NBR_OUT_DESCR 8
  69#define OUT_BUFFER_SIZE (1024 * NBR_OUT_DESCR)
  70
  71#define DEFAULT_FRAME_RATE 0
  72#define DEFAULT_WORD_RATE 7
  73
  74/* To be removed when we move to pure udev. */
  75#define SYNC_SERIAL_MAJOR 125
  76
  77/* NOTE: Enabling some debug will likely cause overrun or underrun,
  78 * especially if manual mode is used.
  79 */
  80#define DEBUG(x)
  81#define DEBUGREAD(x)
  82#define DEBUGWRITE(x)
  83#define DEBUGPOLL(x)
  84#define DEBUGRXINT(x)
  85#define DEBUGTXINT(x)
  86#define DEBUGTRDMA(x)
  87#define DEBUGOUTBUF(x)
  88
  89enum syncser_irq_setup {
  90        no_irq_setup = 0,
  91        dma_irq_setup = 1,
  92        manual_irq_setup = 2,
  93};
  94
  95struct sync_port {
  96        unsigned long regi_sser;
  97        unsigned long regi_dmain;
  98        unsigned long regi_dmaout;
  99
 100        /* Interrupt vectors. */
 101        unsigned long dma_in_intr_vect; /* Used for DMA in. */
 102        unsigned long dma_out_intr_vect; /* Used for DMA out. */
 103        unsigned long syncser_intr_vect; /* Used when no DMA. */
 104
 105        /* DMA number for in and out. */
 106        unsigned int dma_in_nbr;
 107        unsigned int dma_out_nbr;
 108
 109        /* DMA owner. */
 110        enum dma_owner req_dma;
 111
 112        char started; /* 1 if port has been started */
 113        char port_nbr; /* Port 0 or 1 */
 114        char busy; /* 1 if port is busy */
 115
 116        char enabled;  /* 1 if port is enabled */
 117        char use_dma;  /* 1 if port uses dma */
 118        char tr_running;
 119
 120        enum syncser_irq_setup init_irqs;
 121        int output;
 122        int input;
 123
 124        /* Next byte to be read by application */
 125        unsigned char *readp;
 126        /* Next byte to be written by etrax */
 127        unsigned char *writep;
 128
 129        unsigned int in_buffer_size;
 130        unsigned int in_buffer_len;
 131        unsigned int inbufchunk;
 132        /* Data buffers for in and output. */
 133        unsigned char out_buffer[OUT_BUFFER_SIZE] __aligned(32);
 134        unsigned char in_buffer[IN_BUFFER_SIZE] __aligned(32);
 135        unsigned char flip[IN_BUFFER_SIZE] __aligned(32);
 136        struct timespec timestamp[NBR_IN_DESCR];
 137        struct dma_descr_data *next_rx_desc;
 138        struct dma_descr_data *prev_rx_desc;
 139
 140        struct timeval last_timestamp;
 141        int read_ts_idx;
 142        int write_ts_idx;
 143
 144        /* Pointer to the first available descriptor in the ring,
 145         * unless active_tr_descr == catch_tr_descr and a dma
 146         * transfer is active */
 147        struct dma_descr_data *active_tr_descr;
 148
 149        /* Pointer to the first allocated descriptor in the ring */
 150        struct dma_descr_data *catch_tr_descr;
 151
 152        /* Pointer to the descriptor with the current end-of-list */
 153        struct dma_descr_data *prev_tr_descr;
 154        int full;
 155
 156        /* Pointer to the first byte being read by DMA
 157         * or current position in out_buffer if not using DMA. */
 158        unsigned char *out_rd_ptr;
 159
 160        /* Number of bytes currently locked for being read by DMA */
 161        int out_buf_count;
 162
 163        dma_descr_context in_context __aligned(32);
 164        dma_descr_context out_context __aligned(32);
 165        dma_descr_data in_descr[NBR_IN_DESCR] __aligned(16);
 166        dma_descr_data out_descr[NBR_OUT_DESCR] __aligned(16);
 167
 168        wait_queue_head_t out_wait_q;
 169        wait_queue_head_t in_wait_q;
 170
 171        spinlock_t lock;
 172};
 173
 174static DEFINE_MUTEX(sync_serial_mutex);
 175static int etrax_sync_serial_init(void);
 176static void initialize_port(int portnbr);
 177static inline int sync_data_avail(struct sync_port *port);
 178
 179static int sync_serial_open(struct inode *, struct file *);
 180static int sync_serial_release(struct inode *, struct file *);
 181static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
 182
 183static long sync_serial_ioctl(struct file *file,
 184                              unsigned int cmd, unsigned long arg);
 185static int sync_serial_ioctl_unlocked(struct file *file,
 186                                      unsigned int cmd, unsigned long arg);
 187static ssize_t sync_serial_write(struct file *file, const char __user *buf,
 188                                 size_t count, loff_t *ppos);
 189static ssize_t sync_serial_read(struct file *file, char __user *buf,
 190                                size_t count, loff_t *ppos);
 191
 192#if ((defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
 193        defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
 194        (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
 195        defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)))
 196#define SYNC_SER_DMA
 197#else
 198#define SYNC_SER_MANUAL
 199#endif
 200
 201#ifdef SYNC_SER_DMA
 202static void start_dma_out(struct sync_port *port, const char *data, int count);
 203static void start_dma_in(struct sync_port *port);
 204static irqreturn_t tr_interrupt(int irq, void *dev_id);
 205static irqreturn_t rx_interrupt(int irq, void *dev_id);
 206#endif
 207#ifdef SYNC_SER_MANUAL
 208static void send_word(struct sync_port *port);
 209static irqreturn_t manual_interrupt(int irq, void *dev_id);
 210#endif
 211
 212#define artpec_pinmux_alloc_fixed crisv32_pinmux_alloc_fixed
 213#define artpec_request_dma crisv32_request_dma
 214#define artpec_free_dma crisv32_free_dma
 215
 216#ifdef CONFIG_ETRAXFS
 217/* ETRAX FS */
 218#define DMA_OUT_NBR0            SYNC_SER0_TX_DMA_NBR
 219#define DMA_IN_NBR0             SYNC_SER0_RX_DMA_NBR
 220#define DMA_OUT_NBR1            SYNC_SER1_TX_DMA_NBR
 221#define DMA_IN_NBR1             SYNC_SER1_RX_DMA_NBR
 222#define PINMUX_SSER0            pinmux_sser0
 223#define PINMUX_SSER1            pinmux_sser1
 224#define SYNCSER_INST0           regi_sser0
 225#define SYNCSER_INST1           regi_sser1
 226#define SYNCSER_INTR_VECT0      SSER0_INTR_VECT
 227#define SYNCSER_INTR_VECT1      SSER1_INTR_VECT
 228#define OUT_DMA_INST0           regi_dma4
 229#define IN_DMA_INST0            regi_dma5
 230#define DMA_OUT_INTR_VECT0      DMA4_INTR_VECT
 231#define DMA_OUT_INTR_VECT1      DMA7_INTR_VECT
 232#define DMA_IN_INTR_VECT0       DMA5_INTR_VECT
 233#define DMA_IN_INTR_VECT1       DMA6_INTR_VECT
 234#define REQ_DMA_SYNCSER0        dma_sser0
 235#define REQ_DMA_SYNCSER1        dma_sser1
 236#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
 237#define PORT1_DMA 1
 238#else
 239#define PORT1_DMA 0
 240#endif
 241#elif defined(CONFIG_CRIS_MACH_ARTPEC3)
 242/* ARTPEC-3 */
 243#define DMA_OUT_NBR0            SYNC_SER_TX_DMA_NBR
 244#define DMA_IN_NBR0             SYNC_SER_RX_DMA_NBR
 245#define PINMUX_SSER0            pinmux_sser
 246#define SYNCSER_INST0           regi_sser
 247#define SYNCSER_INTR_VECT0      SSER_INTR_VECT
 248#define OUT_DMA_INST0           regi_dma6
 249#define IN_DMA_INST0            regi_dma7
 250#define DMA_OUT_INTR_VECT0      DMA6_INTR_VECT
 251#define DMA_IN_INTR_VECT0       DMA7_INTR_VECT
 252#define REQ_DMA_SYNCSER0        dma_sser
 253#define REQ_DMA_SYNCSER1        dma_sser
 254#endif
 255
 256#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
 257#define PORT0_DMA 1
 258#else
 259#define PORT0_DMA 0
 260#endif
 261
 262/* The ports */
 263static struct sync_port ports[] = {
 264        {
 265                .regi_sser              = SYNCSER_INST0,
 266                .regi_dmaout            = OUT_DMA_INST0,
 267                .regi_dmain             = IN_DMA_INST0,
 268                .use_dma                = PORT0_DMA,
 269                .dma_in_intr_vect       = DMA_IN_INTR_VECT0,
 270                .dma_out_intr_vect      = DMA_OUT_INTR_VECT0,
 271                .dma_in_nbr             = DMA_IN_NBR0,
 272                .dma_out_nbr            = DMA_OUT_NBR0,
 273                .req_dma                = REQ_DMA_SYNCSER0,
 274                .syncser_intr_vect      = SYNCSER_INTR_VECT0,
 275        },
 276#ifdef CONFIG_ETRAXFS
 277        {
 278                .regi_sser              = SYNCSER_INST1,
 279                .regi_dmaout            = regi_dma6,
 280                .regi_dmain             = regi_dma7,
 281                .use_dma                = PORT1_DMA,
 282                .dma_in_intr_vect       = DMA_IN_INTR_VECT1,
 283                .dma_out_intr_vect      = DMA_OUT_INTR_VECT1,
 284                .dma_in_nbr             = DMA_IN_NBR1,
 285                .dma_out_nbr            = DMA_OUT_NBR1,
 286                .req_dma                = REQ_DMA_SYNCSER1,
 287                .syncser_intr_vect      = SYNCSER_INTR_VECT1,
 288        },
 289#endif
 290};
 291
 292#define NBR_PORTS ARRAY_SIZE(ports)
 293
 294static const struct file_operations syncser_fops = {
 295        .owner          = THIS_MODULE,
 296        .write          = sync_serial_write,
 297        .read           = sync_serial_read,
 298        .poll           = sync_serial_poll,
 299        .unlocked_ioctl = sync_serial_ioctl,
 300        .open           = sync_serial_open,
 301        .release        = sync_serial_release,
 302        .llseek         = noop_llseek,
 303};
 304
 305static dev_t syncser_first;
 306static int minor_count = NBR_PORTS;
 307#define SYNCSER_NAME "syncser"
 308static struct cdev *syncser_cdev;
 309static struct class *syncser_class;
 310
 311static void sync_serial_start_port(struct sync_port *port)
 312{
 313        reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
 314        reg_sser_rw_tr_cfg tr_cfg =
 315                REG_RD(sser, port->regi_sser, rw_tr_cfg);
 316        reg_sser_rw_rec_cfg rec_cfg =
 317                REG_RD(sser, port->regi_sser, rw_rec_cfg);
 318        cfg.en = regk_sser_yes;
 319        tr_cfg.tr_en = regk_sser_yes;
 320        rec_cfg.rec_en = regk_sser_yes;
 321        REG_WR(sser, port->regi_sser, rw_cfg, cfg);
 322        REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
 323        REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
 324        port->started = 1;
 325}
 326
 327static void __init initialize_port(int portnbr)
 328{
 329        struct sync_port *port = &ports[portnbr];
 330        reg_sser_rw_cfg cfg = { 0 };
 331        reg_sser_rw_frm_cfg frm_cfg = { 0 };
 332        reg_sser_rw_tr_cfg tr_cfg = { 0 };
 333        reg_sser_rw_rec_cfg rec_cfg = { 0 };
 334
 335        DEBUG(pr_info("Init sync serial port %d\n", portnbr));
 336
 337        port->port_nbr = portnbr;
 338        port->init_irqs = no_irq_setup;
 339
 340        port->out_rd_ptr = port->out_buffer;
 341        port->out_buf_count = 0;
 342
 343        port->output = 1;
 344        port->input = 0;
 345
 346        port->readp = port->flip;
 347        port->writep = port->flip;
 348        port->in_buffer_size = IN_BUFFER_SIZE;
 349        port->in_buffer_len = 0;
 350        port->inbufchunk = IN_DESCR_SIZE;
 351
 352        port->read_ts_idx = 0;
 353        port->write_ts_idx = 0;
 354
 355        init_waitqueue_head(&port->out_wait_q);
 356        init_waitqueue_head(&port->in_wait_q);
 357
 358        spin_lock_init(&port->lock);
 359
 360        cfg.out_clk_src = regk_sser_intern_clk;
 361        cfg.out_clk_pol = regk_sser_pos;
 362        cfg.clk_od_mode = regk_sser_no;
 363        cfg.clk_dir = regk_sser_out;
 364        cfg.gate_clk = regk_sser_no;
 365        cfg.base_freq = regk_sser_f29_493;
 366        cfg.clk_div = 256;
 367        REG_WR(sser, port->regi_sser, rw_cfg, cfg);
 368
 369        frm_cfg.wordrate = DEFAULT_WORD_RATE;
 370        frm_cfg.type = regk_sser_edge;
 371        frm_cfg.frame_pin_dir = regk_sser_out;
 372        frm_cfg.frame_pin_use = regk_sser_frm;
 373        frm_cfg.status_pin_dir = regk_sser_in;
 374        frm_cfg.status_pin_use = regk_sser_hold;
 375        frm_cfg.out_on = regk_sser_tr;
 376        frm_cfg.tr_delay = 1;
 377        REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
 378
 379        tr_cfg.urun_stop = regk_sser_no;
 380        tr_cfg.sample_size = 7;
 381        tr_cfg.sh_dir = regk_sser_msbfirst;
 382        tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
 383#if 0
 384        tr_cfg.rate_ctrl = regk_sser_bulk;
 385        tr_cfg.data_pin_use = regk_sser_dout;
 386#else
 387        tr_cfg.rate_ctrl = regk_sser_iso;
 388        tr_cfg.data_pin_use = regk_sser_dout;
 389#endif
 390        tr_cfg.bulk_wspace = 1;
 391        REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
 392
 393        rec_cfg.sample_size = 7;
 394        rec_cfg.sh_dir = regk_sser_msbfirst;
 395        rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
 396        rec_cfg.fifo_thr = regk_sser_inf;
 397        REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
 398
 399#ifdef SYNC_SER_DMA
 400        {
 401                int i;
 402                /* Setup the descriptor ring for dma out/transmit. */
 403                for (i = 0; i < NBR_OUT_DESCR; i++) {
 404                        dma_descr_data *descr = &port->out_descr[i];
 405                        descr->wait = 0;
 406                        descr->intr = 1;
 407                        descr->eol = 0;
 408                        descr->out_eop = 0;
 409                        descr->next =
 410                                (dma_descr_data *)virt_to_phys(&descr[i+1]);
 411                }
 412        }
 413
 414        /* Create a ring from the list. */
 415        port->out_descr[NBR_OUT_DESCR-1].next =
 416                (dma_descr_data *)virt_to_phys(&port->out_descr[0]);
 417
 418        /* Setup context for traversing the ring. */
 419        port->active_tr_descr = &port->out_descr[0];
 420        port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1];
 421        port->catch_tr_descr = &port->out_descr[0];
 422#endif
 423}
 424
 425static inline int sync_data_avail(struct sync_port *port)
 426{
 427        return port->in_buffer_len;
 428}
 429
 430static int sync_serial_open(struct inode *inode, struct file *file)
 431{
 432        int ret = 0;
 433        int dev = iminor(inode);
 434        struct sync_port *port;
 435#ifdef SYNC_SER_DMA
 436        reg_dma_rw_cfg cfg = { .en = regk_dma_yes };
 437        reg_dma_rw_intr_mask intr_mask = { .data = regk_dma_yes };
 438#endif
 439
 440        DEBUG(pr_debug("Open sync serial port %d\n", dev));
 441
 442        if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
 443                DEBUG(pr_info("Invalid minor %d\n", dev));
 444                return -ENODEV;
 445        }
 446        port = &ports[dev];
 447        /* Allow open this device twice (assuming one reader and one writer) */
 448        if (port->busy == 2) {
 449                DEBUG(pr_info("syncser%d is busy\n", dev));
 450                return -EBUSY;
 451        }
 452
 453        mutex_lock(&sync_serial_mutex);
 454
 455        /* Clear any stale date left in the flip buffer */
 456        port->readp = port->writep = port->flip;
 457        port->in_buffer_len = 0;
 458        port->read_ts_idx = 0;
 459        port->write_ts_idx = 0;
 460
 461        if (port->init_irqs != no_irq_setup) {
 462                /* Init only on first call. */
 463                port->busy++;
 464                mutex_unlock(&sync_serial_mutex);
 465                return 0;
 466        }
 467        if (port->use_dma) {
 468#ifdef SYNC_SER_DMA
 469                const char *tmp;
 470                DEBUG(pr_info("Using DMA for syncser%d\n", dev));
 471
 472                tmp = dev == 0 ? "syncser0 tx" : "syncser1 tx";
 473                if (request_irq(port->dma_out_intr_vect, tr_interrupt, 0,
 474                                tmp, port)) {
 475                        pr_err("Can't alloc syncser%d TX IRQ", dev);
 476                        ret = -EBUSY;
 477                        goto unlock_and_exit;
 478                }
 479                if (artpec_request_dma(port->dma_out_nbr, tmp,
 480                                DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
 481                        free_irq(port->dma_out_intr_vect, port);
 482                        pr_err("Can't alloc syncser%d TX DMA", dev);
 483                        ret = -EBUSY;
 484                        goto unlock_and_exit;
 485                }
 486                tmp = dev == 0 ? "syncser0 rx" : "syncser1 rx";
 487                if (request_irq(port->dma_in_intr_vect, rx_interrupt, 0,
 488                                tmp, port)) {
 489                        artpec_free_dma(port->dma_out_nbr);
 490                        free_irq(port->dma_out_intr_vect, port);
 491                        pr_err("Can't alloc syncser%d RX IRQ", dev);
 492                        ret = -EBUSY;
 493                        goto unlock_and_exit;
 494                }
 495                if (artpec_request_dma(port->dma_in_nbr, tmp,
 496                                DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
 497                        artpec_free_dma(port->dma_out_nbr);
 498                        free_irq(port->dma_out_intr_vect, port);
 499                        free_irq(port->dma_in_intr_vect, port);
 500                        pr_err("Can't alloc syncser%d RX DMA", dev);
 501                        ret = -EBUSY;
 502                        goto unlock_and_exit;
 503                }
 504                /* Enable DMAs */
 505                REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
 506                REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
 507                /* Enable DMA IRQs */
 508                REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
 509                REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
 510                /* Set up wordsize = 1 for DMAs. */
 511                DMA_WR_CMD(port->regi_dmain, regk_dma_set_w_size1);
 512                DMA_WR_CMD(port->regi_dmaout, regk_dma_set_w_size1);
 513
 514                start_dma_in(port);
 515                port->init_irqs = dma_irq_setup;
 516#endif
 517        } else { /* !port->use_dma */
 518#ifdef SYNC_SER_MANUAL
 519                const char *tmp = dev == 0 ? "syncser0 manual irq" :
 520                                             "syncser1 manual irq";
 521                if (request_irq(port->syncser_intr_vect, manual_interrupt,
 522                                0, tmp, port)) {
 523                        pr_err("Can't alloc syncser%d manual irq",
 524                                dev);
 525                        ret = -EBUSY;
 526                        goto unlock_and_exit;
 527                }
 528                port->init_irqs = manual_irq_setup;
 529#else
 530                panic("sync_serial: Manual mode not supported\n");
 531#endif /* SYNC_SER_MANUAL */
 532        }
 533        port->busy++;
 534        ret = 0;
 535
 536unlock_and_exit:
 537        mutex_unlock(&sync_serial_mutex);
 538        return ret;
 539}
 540
 541static int sync_serial_release(struct inode *inode, struct file *file)
 542{
 543        int dev = iminor(inode);
 544        struct sync_port *port;
 545
 546        if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
 547                DEBUG(pr_info("Invalid minor %d\n", dev));
 548                return -ENODEV;
 549        }
 550        port = &ports[dev];
 551        if (port->busy)
 552                port->busy--;
 553        if (!port->busy)
 554                /* XXX */;
 555        return 0;
 556}
 557
 558static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
 559{
 560        int dev = iminor(file_inode(file));
 561        unsigned int mask = 0;
 562        struct sync_port *port;
 563        DEBUGPOLL(
 564        static unsigned int prev_mask;
 565        );
 566
 567        port = &ports[dev];
 568
 569        if (!port->started)
 570                sync_serial_start_port(port);
 571
 572        poll_wait(file, &port->out_wait_q, wait);
 573        poll_wait(file, &port->in_wait_q, wait);
 574
 575        /* No active transfer, descriptors are available */
 576        if (port->output && !port->tr_running)
 577                mask |= POLLOUT | POLLWRNORM;
 578
 579        /* Descriptor and buffer space available. */
 580        if (port->output &&
 581            port->active_tr_descr != port->catch_tr_descr &&
 582            port->out_buf_count < OUT_BUFFER_SIZE)
 583                mask |=  POLLOUT | POLLWRNORM;
 584
 585        /* At least an inbufchunk of data */
 586        if (port->input && sync_data_avail(port) >= port->inbufchunk)
 587                mask |= POLLIN | POLLRDNORM;
 588
 589        DEBUGPOLL(
 590        if (mask != prev_mask)
 591                pr_info("sync_serial_poll: mask 0x%08X %s %s\n",
 592                        mask,
 593                        mask & POLLOUT ? "POLLOUT" : "",
 594                        mask & POLLIN ? "POLLIN" : "");
 595                prev_mask = mask;
 596        );
 597        return mask;
 598}
 599
 600static ssize_t __sync_serial_read(struct file *file,
 601                                  char __user *buf,
 602                                  size_t count,
 603                                  loff_t *ppos,
 604                                  struct timespec *ts)
 605{
 606        unsigned long flags;
 607        int dev = MINOR(file_inode(file)->i_rdev);
 608        int avail;
 609        struct sync_port *port;
 610        unsigned char *start;
 611        unsigned char *end;
 612
 613        if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
 614                DEBUG(pr_info("Invalid minor %d\n", dev));
 615                return -ENODEV;
 616        }
 617        port = &ports[dev];
 618
 619        if (!port->started)
 620                sync_serial_start_port(port);
 621
 622        /* Calculate number of available bytes */
 623        /* Save pointers to avoid that they are modified by interrupt */
 624        spin_lock_irqsave(&port->lock, flags);
 625        start = port->readp;
 626        end = port->writep;
 627        spin_unlock_irqrestore(&port->lock, flags);
 628
 629        while ((start == end) && !port->in_buffer_len) {
 630                if (file->f_flags & O_NONBLOCK)
 631                        return -EAGAIN;
 632
 633                wait_event_interruptible(port->in_wait_q,
 634                                         !(start == end && !port->full));
 635
 636                if (signal_pending(current))
 637                        return -EINTR;
 638
 639                spin_lock_irqsave(&port->lock, flags);
 640                start = port->readp;
 641                end = port->writep;
 642                spin_unlock_irqrestore(&port->lock, flags);
 643        }
 644
 645        DEBUGREAD(pr_info("R%d c %d ri %u wi %u /%u\n",
 646                          dev, count,
 647                          start - port->flip, end - port->flip,
 648                          port->in_buffer_size));
 649
 650        /* Lazy read, never return wrapped data. */
 651        if (end > start)
 652                avail = end - start;
 653        else
 654                avail = port->flip + port->in_buffer_size - start;
 655
 656        count = count > avail ? avail : count;
 657        if (copy_to_user(buf, start, count))
 658                return -EFAULT;
 659
 660        /* If timestamp requested, find timestamp of first returned byte
 661         * and copy it.
 662         * N.B: Applications that request timstamps MUST read data in
 663         * chunks that are multiples of IN_DESCR_SIZE.
 664         * Otherwise the timestamps will not be aligned to the data read.
 665         */
 666        if (ts != NULL) {
 667                int idx = port->read_ts_idx;
 668                memcpy(ts, &port->timestamp[idx], sizeof(struct timespec));
 669                port->read_ts_idx += count / IN_DESCR_SIZE;
 670                if (port->read_ts_idx >= NBR_IN_DESCR)
 671                        port->read_ts_idx = 0;
 672        }
 673
 674        spin_lock_irqsave(&port->lock, flags);
 675        port->readp += count;
 676        /* Check for wrap */
 677        if (port->readp >= port->flip + port->in_buffer_size)
 678                port->readp = port->flip;
 679        port->in_buffer_len -= count;
 680        port->full = 0;
 681        spin_unlock_irqrestore(&port->lock, flags);
 682
 683        DEBUGREAD(pr_info("r %d\n", count));
 684
 685        return count;
 686}
 687
 688static ssize_t sync_serial_input(struct file *file, unsigned long arg)
 689{
 690        struct ssp_request req;
 691        int count;
 692        int ret;
 693
 694        /* Copy the request structure from user-mode. */
 695        ret = copy_from_user(&req, (struct ssp_request __user *)arg,
 696                sizeof(struct ssp_request));
 697
 698        if (ret) {
 699                DEBUG(pr_info("sync_serial_input copy from user failed\n"));
 700                return -EFAULT;
 701        }
 702
 703        /* To get the timestamps aligned, make sure that 'len'
 704         * is a multiple of IN_DESCR_SIZE.
 705         */
 706        if ((req.len % IN_DESCR_SIZE) != 0) {
 707                DEBUG(pr_info("sync_serial: req.len %x, IN_DESCR_SIZE %x\n",
 708                              req.len, IN_DESCR_SIZE));
 709                return -EFAULT;
 710        }
 711
 712        /* Do the actual read. */
 713        /* Note that req.buf is actually a pointer to user space. */
 714        count = __sync_serial_read(file, req.buf, req.len,
 715                                   NULL, &req.ts);
 716
 717        if (count < 0) {
 718                DEBUG(pr_info("sync_serial_input read failed\n"));
 719                return count;
 720        }
 721
 722        /* Copy the request back to user-mode. */
 723        ret = copy_to_user((struct ssp_request __user *)arg, &req,
 724                sizeof(struct ssp_request));
 725
 726        if (ret) {
 727                DEBUG(pr_info("syncser input copy2user failed\n"));
 728                return -EFAULT;
 729        }
 730
 731        /* Return the number of bytes read. */
 732        return count;
 733}
 734
 735
 736static int sync_serial_ioctl_unlocked(struct file *file,
 737                                      unsigned int cmd, unsigned long arg)
 738{
 739        int return_val = 0;
 740        int dma_w_size = regk_dma_set_w_size1;
 741        int dev = iminor(file_inode(file));
 742        struct sync_port *port;
 743        reg_sser_rw_tr_cfg tr_cfg;
 744        reg_sser_rw_rec_cfg rec_cfg;
 745        reg_sser_rw_frm_cfg frm_cfg;
 746        reg_sser_rw_cfg gen_cfg;
 747        reg_sser_rw_intr_mask intr_mask;
 748
 749        if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
 750                DEBUG(pr_info("Invalid minor %d\n", dev));
 751                return -1;
 752        }
 753
 754        if (cmd == SSP_INPUT)
 755                return sync_serial_input(file, arg);
 756
 757        port = &ports[dev];
 758        spin_lock_irq(&port->lock);
 759
 760        tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
 761        rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
 762        frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
 763        gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
 764        intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
 765
 766        switch (cmd) {
 767        case SSP_SPEED:
 768                if (GET_SPEED(arg) == CODEC) {
 769                        unsigned int freq;
 770
 771                        gen_cfg.base_freq = regk_sser_f32;
 772
 773                        /* Clock divider will internally be
 774                         * gen_cfg.clk_div + 1.
 775                         */
 776
 777                        freq = GET_FREQ(arg);
 778                        switch (freq) {
 779                        case FREQ_32kHz:
 780                        case FREQ_64kHz:
 781                        case FREQ_128kHz:
 782                        case FREQ_256kHz:
 783                                gen_cfg.clk_div = 125 *
 784                                        (1 << (freq - FREQ_256kHz)) - 1;
 785                                break;
 786                        case FREQ_512kHz:
 787                                gen_cfg.clk_div = 62;
 788                                break;
 789                        case FREQ_1MHz:
 790                        case FREQ_2MHz:
 791                        case FREQ_4MHz:
 792                                gen_cfg.clk_div = 8 * (1 << freq) - 1;
 793                                break;
 794                        }
 795                } else if (GET_SPEED(arg) == CODEC_f32768) {
 796                        gen_cfg.base_freq = regk_sser_f32_768;
 797                        switch (GET_FREQ(arg)) {
 798                        case FREQ_4096kHz:
 799                                gen_cfg.clk_div = 7;
 800                                break;
 801                        default:
 802                                spin_unlock_irq(&port->lock);
 803                                return -EINVAL;
 804                        }
 805                } else {
 806                        gen_cfg.base_freq = regk_sser_f29_493;
 807                        switch (GET_SPEED(arg)) {
 808                        case SSP150:
 809                                gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
 810                                break;
 811                        case SSP300:
 812                                gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
 813                                break;
 814                        case SSP600:
 815                                gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
 816                                break;
 817                        case SSP1200:
 818                                gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
 819                                break;
 820                        case SSP2400:
 821                                gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
 822                                break;
 823                        case SSP4800:
 824                                gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
 825                                break;
 826                        case SSP9600:
 827                                gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
 828                                break;
 829                        case SSP19200:
 830                                gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
 831                                break;
 832                        case SSP28800:
 833                                gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
 834                                break;
 835                        case SSP57600:
 836                                gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
 837                                break;
 838                        case SSP115200:
 839                                gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
 840                                break;
 841                        case SSP230400:
 842                                gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
 843                                break;
 844                        case SSP460800:
 845                                gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
 846                                break;
 847                        case SSP921600:
 848                                gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
 849                                break;
 850                        case SSP3125000:
 851                                gen_cfg.base_freq = regk_sser_f100;
 852                                gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
 853                                break;
 854
 855                        }
 856                }
 857                frm_cfg.wordrate = GET_WORD_RATE(arg);
 858
 859                break;
 860        case SSP_MODE:
 861                switch (arg) {
 862                case MASTER_OUTPUT:
 863                        port->output = 1;
 864                        port->input = 0;
 865                        frm_cfg.out_on = regk_sser_tr;
 866                        frm_cfg.frame_pin_dir = regk_sser_out;
 867                        gen_cfg.clk_dir = regk_sser_out;
 868                        break;
 869                case SLAVE_OUTPUT:
 870                        port->output = 1;
 871                        port->input = 0;
 872                        frm_cfg.frame_pin_dir = regk_sser_in;
 873                        gen_cfg.clk_dir = regk_sser_in;
 874                        break;
 875                case MASTER_INPUT:
 876                        port->output = 0;
 877                        port->input = 1;
 878                        frm_cfg.frame_pin_dir = regk_sser_out;
 879                        frm_cfg.out_on = regk_sser_intern_tb;
 880                        gen_cfg.clk_dir = regk_sser_out;
 881                        break;
 882                case SLAVE_INPUT:
 883                        port->output = 0;
 884                        port->input = 1;
 885                        frm_cfg.frame_pin_dir = regk_sser_in;
 886                        gen_cfg.clk_dir = regk_sser_in;
 887                        break;
 888                case MASTER_BIDIR:
 889                        port->output = 1;
 890                        port->input = 1;
 891                        frm_cfg.frame_pin_dir = regk_sser_out;
 892                        frm_cfg.out_on = regk_sser_intern_tb;
 893                        gen_cfg.clk_dir = regk_sser_out;
 894                        break;
 895                case SLAVE_BIDIR:
 896                        port->output = 1;
 897                        port->input = 1;
 898                        frm_cfg.frame_pin_dir = regk_sser_in;
 899                        gen_cfg.clk_dir = regk_sser_in;
 900                        break;
 901                default:
 902                        spin_unlock_irq(&port->lock);
 903                        return -EINVAL;
 904                }
 905                if (!port->use_dma || arg == MASTER_OUTPUT ||
 906                                arg == SLAVE_OUTPUT)
 907                        intr_mask.rdav = regk_sser_yes;
 908                break;
 909        case SSP_FRAME_SYNC:
 910                if (arg & NORMAL_SYNC) {
 911                        frm_cfg.rec_delay = 1;
 912                        frm_cfg.tr_delay = 1;
 913                } else if (arg & EARLY_SYNC)
 914                        frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
 915                else if (arg & LATE_SYNC) {
 916                        frm_cfg.tr_delay = 2;
 917                        frm_cfg.rec_delay = 2;
 918                } else if (arg & SECOND_WORD_SYNC) {
 919                        frm_cfg.rec_delay = 7;
 920                        frm_cfg.tr_delay = 1;
 921                }
 922
 923                tr_cfg.bulk_wspace = frm_cfg.tr_delay;
 924                frm_cfg.early_wend = regk_sser_yes;
 925                if (arg & BIT_SYNC)
 926                        frm_cfg.type = regk_sser_edge;
 927                else if (arg & WORD_SYNC)
 928                        frm_cfg.type = regk_sser_level;
 929                else if (arg & EXTENDED_SYNC)
 930                        frm_cfg.early_wend = regk_sser_no;
 931
 932                if (arg & SYNC_ON)
 933                        frm_cfg.frame_pin_use = regk_sser_frm;
 934                else if (arg & SYNC_OFF)
 935                        frm_cfg.frame_pin_use = regk_sser_gio0;
 936
 937                dma_w_size = regk_dma_set_w_size2;
 938                if (arg & WORD_SIZE_8) {
 939                        rec_cfg.sample_size = tr_cfg.sample_size = 7;
 940                        dma_w_size = regk_dma_set_w_size1;
 941                } else if (arg & WORD_SIZE_12)
 942                        rec_cfg.sample_size = tr_cfg.sample_size = 11;
 943                else if (arg & WORD_SIZE_16)
 944                        rec_cfg.sample_size = tr_cfg.sample_size = 15;
 945                else if (arg & WORD_SIZE_24)
 946                        rec_cfg.sample_size = tr_cfg.sample_size = 23;
 947                else if (arg & WORD_SIZE_32)
 948                        rec_cfg.sample_size = tr_cfg.sample_size = 31;
 949
 950                if (arg & BIT_ORDER_MSB)
 951                        rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
 952                else if (arg & BIT_ORDER_LSB)
 953                        rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
 954
 955                if (arg & FLOW_CONTROL_ENABLE) {
 956                        frm_cfg.status_pin_use = regk_sser_frm;
 957                        rec_cfg.fifo_thr = regk_sser_thr16;
 958                } else if (arg & FLOW_CONTROL_DISABLE) {
 959                        frm_cfg.status_pin_use = regk_sser_gio0;
 960                        rec_cfg.fifo_thr = regk_sser_inf;
 961                }
 962
 963                if (arg & CLOCK_NOT_GATED)
 964                        gen_cfg.gate_clk = regk_sser_no;
 965                else if (arg & CLOCK_GATED)
 966                        gen_cfg.gate_clk = regk_sser_yes;
 967
 968                break;
 969        case SSP_IPOLARITY:
 970                /* NOTE!! negedge is considered NORMAL */
 971                if (arg & CLOCK_NORMAL)
 972                        rec_cfg.clk_pol = regk_sser_neg;
 973                else if (arg & CLOCK_INVERT)
 974                        rec_cfg.clk_pol = regk_sser_pos;
 975
 976                if (arg & FRAME_NORMAL)
 977                        frm_cfg.level = regk_sser_pos_hi;
 978                else if (arg & FRAME_INVERT)
 979                        frm_cfg.level = regk_sser_neg_lo;
 980
 981                if (arg & STATUS_NORMAL)
 982                        gen_cfg.hold_pol = regk_sser_pos;
 983                else if (arg & STATUS_INVERT)
 984                        gen_cfg.hold_pol = regk_sser_neg;
 985                break;
 986        case SSP_OPOLARITY:
 987                if (arg & CLOCK_NORMAL)
 988                        gen_cfg.out_clk_pol = regk_sser_pos;
 989                else if (arg & CLOCK_INVERT)
 990                        gen_cfg.out_clk_pol = regk_sser_neg;
 991
 992                if (arg & FRAME_NORMAL)
 993                        frm_cfg.level = regk_sser_pos_hi;
 994                else if (arg & FRAME_INVERT)
 995                        frm_cfg.level = regk_sser_neg_lo;
 996
 997                if (arg & STATUS_NORMAL)
 998                        gen_cfg.hold_pol = regk_sser_pos;
 999                else if (arg & STATUS_INVERT)
1000                        gen_cfg.hold_pol = regk_sser_neg;
1001                break;
1002        case SSP_SPI:
1003                rec_cfg.fifo_thr = regk_sser_inf;
1004                rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
1005                rec_cfg.sample_size = tr_cfg.sample_size = 7;
1006                frm_cfg.frame_pin_use = regk_sser_frm;
1007                frm_cfg.type = regk_sser_level;
1008                frm_cfg.tr_delay = 1;
1009                frm_cfg.level = regk_sser_neg_lo;
1010                if (arg & SPI_SLAVE) {
1011                        rec_cfg.clk_pol = regk_sser_neg;
1012                        gen_cfg.clk_dir = regk_sser_in;
1013                        port->input = 1;
1014                        port->output = 0;
1015                } else {
1016                        gen_cfg.out_clk_pol = regk_sser_pos;
1017                        port->input = 0;
1018                        port->output = 1;
1019                        gen_cfg.clk_dir = regk_sser_out;
1020                }
1021                break;
1022        case SSP_INBUFCHUNK:
1023                break;
1024        default:
1025                return_val = -1;
1026        }
1027
1028
1029        if (port->started) {
1030                rec_cfg.rec_en = port->input;
1031                gen_cfg.en = (port->output | port->input);
1032        }
1033
1034        REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1035        REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1036        REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
1037        REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1038        REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
1039
1040
1041        if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 |
1042                        WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) {
1043                int en = gen_cfg.en;
1044                gen_cfg.en = 0;
1045                REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
1046                /* ##### Should DMA be stoped before we change dma size? */
1047                DMA_WR_CMD(port->regi_dmain, dma_w_size);
1048                DMA_WR_CMD(port->regi_dmaout, dma_w_size);
1049                gen_cfg.en = en;
1050                REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
1051        }
1052
1053        spin_unlock_irq(&port->lock);
1054        return return_val;
1055}
1056
1057static long sync_serial_ioctl(struct file *file,
1058                unsigned int cmd, unsigned long arg)
1059{
1060        long ret;
1061
1062        mutex_lock(&sync_serial_mutex);
1063        ret = sync_serial_ioctl_unlocked(file, cmd, arg);
1064        mutex_unlock(&sync_serial_mutex);
1065
1066        return ret;
1067}
1068
1069/* NOTE: sync_serial_write does not support concurrency */
1070static ssize_t sync_serial_write(struct file *file, const char __user *buf,
1071                                 size_t count, loff_t *ppos)
1072{
1073        int dev = iminor(file_inode(file));
1074        DECLARE_WAITQUEUE(wait, current);
1075        struct sync_port *port;
1076        int trunc_count;
1077        unsigned long flags;
1078        int bytes_free;
1079        int out_buf_count;
1080
1081        unsigned char *rd_ptr;       /* First allocated byte in the buffer */
1082        unsigned char *wr_ptr;       /* First free byte in the buffer */
1083        unsigned char *buf_stop_ptr; /* Last byte + 1 */
1084
1085        if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
1086                DEBUG(pr_info("Invalid minor %d\n", dev));
1087                return -ENODEV;
1088        }
1089        port = &ports[dev];
1090
1091        /* |<-         OUT_BUFFER_SIZE                          ->|
1092         *           |<- out_buf_count ->|
1093         *                               |<- trunc_count ->| ...->|
1094         *  ______________________________________________________
1095         * |  free   |   data            | free                   |
1096         * |_________|___________________|________________________|
1097         *           ^ rd_ptr            ^ wr_ptr
1098         */
1099        DEBUGWRITE(pr_info("W d%d c %u a: %p c: %p\n",
1100                           port->port_nbr, count, port->active_tr_descr,
1101                           port->catch_tr_descr));
1102
1103        /* Read variables that may be updated by interrupts */
1104        spin_lock_irqsave(&port->lock, flags);
1105        rd_ptr = port->out_rd_ptr;
1106        out_buf_count = port->out_buf_count;
1107        spin_unlock_irqrestore(&port->lock, flags);
1108
1109        /* Check if resources are available */
1110        if (port->tr_running &&
1111            ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
1112             out_buf_count >= OUT_BUFFER_SIZE)) {
1113                DEBUGWRITE(pr_info("sser%d full\n", dev));
1114                return -EAGAIN;
1115        }
1116
1117        buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE;
1118
1119        /* Determine pointer to the first free byte, before copying. */
1120        wr_ptr = rd_ptr + out_buf_count;
1121        if (wr_ptr >= buf_stop_ptr)
1122                wr_ptr -= OUT_BUFFER_SIZE;
1123
1124        /* If we wrap the ring buffer, let the user space program handle it by
1125         * truncating the data. This could be more elegant, small buffer
1126         * fragments may occur.
1127         */
1128        bytes_free = OUT_BUFFER_SIZE - out_buf_count;
1129        if (wr_ptr + bytes_free > buf_stop_ptr)
1130                bytes_free = buf_stop_ptr - wr_ptr;
1131        trunc_count = (count < bytes_free) ? count : bytes_free;
1132
1133        if (copy_from_user(wr_ptr, buf, trunc_count))
1134                return -EFAULT;
1135
1136        DEBUGOUTBUF(pr_info("%-4d + %-4d = %-4d     %p %p %p\n",
1137                            out_buf_count, trunc_count,
1138                            port->out_buf_count, port->out_buffer,
1139                            wr_ptr, buf_stop_ptr));
1140
1141        /* Make sure transmitter/receiver is running */
1142        if (!port->started) {
1143                reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1144                reg_sser_rw_rec_cfg rec_cfg =
1145                        REG_RD(sser, port->regi_sser, rw_rec_cfg);
1146                cfg.en = regk_sser_yes;
1147                rec_cfg.rec_en = port->input;
1148                REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1149                REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1150                port->started = 1;
1151        }
1152
1153        /* Setup wait if blocking */
1154        if (!(file->f_flags & O_NONBLOCK)) {
1155                add_wait_queue(&port->out_wait_q, &wait);
1156                set_current_state(TASK_INTERRUPTIBLE);
1157        }
1158
1159        spin_lock_irqsave(&port->lock, flags);
1160        port->out_buf_count += trunc_count;
1161        if (port->use_dma) {
1162#ifdef SYNC_SER_DMA
1163                start_dma_out(port, wr_ptr, trunc_count);
1164#endif
1165        } else if (!port->tr_running) {
1166#ifdef SYNC_SER_MANUAL
1167                reg_sser_rw_intr_mask intr_mask;
1168                intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1169                /* Start sender by writing data */
1170                send_word(port);
1171                /* and enable transmitter ready IRQ */
1172                intr_mask.trdy = 1;
1173                REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1174#endif
1175        }
1176        spin_unlock_irqrestore(&port->lock, flags);
1177
1178        /* Exit if non blocking */
1179        if (file->f_flags & O_NONBLOCK) {
1180                DEBUGWRITE(pr_info("w d%d c %u  %08x\n",
1181                                   port->port_nbr, trunc_count,
1182                                   REG_RD_INT(dma, port->regi_dmaout, r_intr)));
1183                return trunc_count;
1184        }
1185
1186        schedule();
1187        remove_wait_queue(&port->out_wait_q, &wait);
1188
1189        if (signal_pending(current))
1190                return -EINTR;
1191
1192        DEBUGWRITE(pr_info("w d%d c %u\n", port->port_nbr, trunc_count));
1193        return trunc_count;
1194}
1195
1196static ssize_t sync_serial_read(struct file *file, char __user *buf,
1197                                size_t count, loff_t *ppos)
1198{
1199        return __sync_serial_read(file, buf, count, ppos, NULL);
1200}
1201
1202#ifdef SYNC_SER_MANUAL
1203static void send_word(struct sync_port *port)
1204{
1205        reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1206        reg_sser_rw_tr_data tr_data =  {0};
1207
1208        switch (tr_cfg.sample_size) {
1209        case 8:
1210                port->out_buf_count--;
1211                tr_data.data = *port->out_rd_ptr++;
1212                REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1213                if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1214                        port->out_rd_ptr = port->out_buffer;
1215                break;
1216        case 12:
1217        {
1218                int data = (*port->out_rd_ptr++) << 8;
1219                data |= *port->out_rd_ptr++;
1220                port->out_buf_count -= 2;
1221                tr_data.data = data;
1222                REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1223                if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1224                        port->out_rd_ptr = port->out_buffer;
1225                break;
1226        }
1227        case 16:
1228                port->out_buf_count -= 2;
1229                tr_data.data = *(unsigned short *)port->out_rd_ptr;
1230                REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1231                port->out_rd_ptr += 2;
1232                if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1233                        port->out_rd_ptr = port->out_buffer;
1234                break;
1235        case 24:
1236                port->out_buf_count -= 3;
1237                tr_data.data = *(unsigned short *)port->out_rd_ptr;
1238                REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1239                port->out_rd_ptr += 2;
1240                tr_data.data = *port->out_rd_ptr++;
1241                REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1242                if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1243                        port->out_rd_ptr = port->out_buffer;
1244                break;
1245        case 32:
1246                port->out_buf_count -= 4;
1247                tr_data.data = *(unsigned short *)port->out_rd_ptr;
1248                REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1249                port->out_rd_ptr += 2;
1250                tr_data.data = *(unsigned short *)port->out_rd_ptr;
1251                REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1252                port->out_rd_ptr += 2;
1253                if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1254                        port->out_rd_ptr = port->out_buffer;
1255                break;
1256        }
1257}
1258#endif
1259
1260#ifdef SYNC_SER_DMA
1261static void start_dma_out(struct sync_port *port, const char *data, int count)
1262{
1263        port->active_tr_descr->buf = (char *)virt_to_phys((char *)data);
1264        port->active_tr_descr->after = port->active_tr_descr->buf + count;
1265        port->active_tr_descr->intr = 1;
1266
1267        port->active_tr_descr->eol = 1;
1268        port->prev_tr_descr->eol = 0;
1269
1270        DEBUGTRDMA(pr_info("Inserting eolr:%p eol@:%p\n",
1271                port->prev_tr_descr, port->active_tr_descr));
1272        port->prev_tr_descr = port->active_tr_descr;
1273        port->active_tr_descr = phys_to_virt((int)port->active_tr_descr->next);
1274
1275        if (!port->tr_running) {
1276                reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
1277                        rw_tr_cfg);
1278
1279                port->out_context.next = NULL;
1280                port->out_context.saved_data =
1281                        (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
1282                port->out_context.saved_data_buf = port->prev_tr_descr->buf;
1283
1284                DMA_START_CONTEXT(port->regi_dmaout,
1285                        virt_to_phys((char *)&port->out_context));
1286
1287                tr_cfg.tr_en = regk_sser_yes;
1288                REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1289                DEBUGTRDMA(pr_info("dma s\n"););
1290        } else {
1291                DMA_CONTINUE_DATA(port->regi_dmaout);
1292                DEBUGTRDMA(pr_info("dma c\n"););
1293        }
1294
1295        port->tr_running = 1;
1296}
1297
1298static void start_dma_in(struct sync_port *port)
1299{
1300        int i;
1301        char *buf;
1302        unsigned long flags;
1303        spin_lock_irqsave(&port->lock, flags);
1304        port->writep = port->flip;
1305        spin_unlock_irqrestore(&port->lock, flags);
1306
1307        buf = (char *)virt_to_phys(port->in_buffer);
1308        for (i = 0; i < NBR_IN_DESCR; i++) {
1309                port->in_descr[i].buf = buf;
1310                port->in_descr[i].after = buf + port->inbufchunk;
1311                port->in_descr[i].intr = 1;
1312                port->in_descr[i].next =
1313                        (dma_descr_data *)virt_to_phys(&port->in_descr[i+1]);
1314                port->in_descr[i].buf = buf;
1315                buf += port->inbufchunk;
1316        }
1317        /* Link the last descriptor to the first */
1318        port->in_descr[i-1].next =
1319                (dma_descr_data *)virt_to_phys(&port->in_descr[0]);
1320        port->in_descr[i-1].eol = regk_sser_yes;
1321        port->next_rx_desc = &port->in_descr[0];
1322        port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
1323        port->in_context.saved_data =
1324                (dma_descr_data *)virt_to_phys(&port->in_descr[0]);
1325        port->in_context.saved_data_buf = port->in_descr[0].buf;
1326        DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
1327}
1328
1329static irqreturn_t tr_interrupt(int irq, void *dev_id)
1330{
1331        reg_dma_r_masked_intr masked;
1332        reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
1333        reg_dma_rw_stat stat;
1334        int i;
1335        int found = 0;
1336        int stop_sser = 0;
1337
1338        for (i = 0; i < NBR_PORTS; i++) {
1339                struct sync_port *port = &ports[i];
1340                if (!port->enabled || !port->use_dma)
1341                        continue;
1342
1343                /* IRQ active for the port? */
1344                masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
1345                if (!masked.data)
1346                        continue;
1347
1348                found = 1;
1349
1350                /* Check if we should stop the DMA transfer */
1351                stat = REG_RD(dma, port->regi_dmaout, rw_stat);
1352                if (stat.list_state == regk_dma_data_at_eol)
1353                        stop_sser = 1;
1354
1355                /* Clear IRQ */
1356                REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
1357
1358                if (!stop_sser) {
1359                        /* The DMA has completed a descriptor, EOL was not
1360                         * encountered, so step relevant descriptor and
1361                         * datapointers forward. */
1362                        int sent;
1363                        sent = port->catch_tr_descr->after -
1364                                port->catch_tr_descr->buf;
1365                        DEBUGTXINT(pr_info("%-4d - %-4d = %-4d\t"
1366                                           "in descr %p (ac: %p)\n",
1367                                           port->out_buf_count, sent,
1368                                           port->out_buf_count - sent,
1369                                           port->catch_tr_descr,
1370                                           port->active_tr_descr););
1371                        port->out_buf_count -= sent;
1372                        port->catch_tr_descr =
1373                                phys_to_virt((int) port->catch_tr_descr->next);
1374                        port->out_rd_ptr =
1375                                phys_to_virt((int) port->catch_tr_descr->buf);
1376                } else {
1377                        reg_sser_rw_tr_cfg tr_cfg;
1378                        int j, sent;
1379                        /* EOL handler.
1380                         * Note that if an EOL was encountered during the irq
1381                         * locked section of sync_ser_write the DMA will be
1382                         * restarted and the eol flag will be cleared.
1383                         * The remaining descriptors will be traversed by
1384                         * the descriptor interrupts as usual.
1385                         */
1386                        j = 0;
1387                        while (!port->catch_tr_descr->eol) {
1388                                sent = port->catch_tr_descr->after -
1389                                        port->catch_tr_descr->buf;
1390                                DEBUGOUTBUF(pr_info(
1391                                        "traversing descr %p -%d (%d)\n",
1392                                        port->catch_tr_descr,
1393                                        sent,
1394                                        port->out_buf_count));
1395                                port->out_buf_count -= sent;
1396                                port->catch_tr_descr = phys_to_virt(
1397                                        (int)port->catch_tr_descr->next);
1398                                j++;
1399                                if (j >= NBR_OUT_DESCR) {
1400                                        /* TODO: Reset and recover */
1401                                        panic("sync_serial: missing eol");
1402                                }
1403                        }
1404                        sent = port->catch_tr_descr->after -
1405                                port->catch_tr_descr->buf;
1406                        DEBUGOUTBUF(pr_info("eol at descr %p -%d (%d)\n",
1407                                port->catch_tr_descr,
1408                                sent,
1409                                port->out_buf_count));
1410
1411                        port->out_buf_count -= sent;
1412
1413                        /* Update read pointer to first free byte, we
1414                         * may already be writing data there. */
1415                        port->out_rd_ptr =
1416                                phys_to_virt((int) port->catch_tr_descr->after);
1417                        if (port->out_rd_ptr > port->out_buffer +
1418                                        OUT_BUFFER_SIZE)
1419                                port->out_rd_ptr = port->out_buffer;
1420
1421                        tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1422                        DEBUGTXINT(pr_info(
1423                                "tr_int DMA stop %d, set catch @ %p\n",
1424                                port->out_buf_count,
1425                                port->active_tr_descr));
1426                        if (port->out_buf_count != 0)
1427                                pr_err("sync_ser: buf not empty after eol\n");
1428                        port->catch_tr_descr = port->active_tr_descr;
1429                        port->tr_running = 0;
1430                        tr_cfg.tr_en = regk_sser_no;
1431                        REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1432                }
1433                /* wake up the waiting process */
1434                wake_up_interruptible(&port->out_wait_q);
1435        }
1436        return IRQ_RETVAL(found);
1437} /* tr_interrupt */
1438
1439
1440static inline void handle_rx_packet(struct sync_port *port)
1441{
1442        int idx;
1443        reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
1444        unsigned long flags;
1445
1446        DEBUGRXINT(pr_info("!"));
1447        spin_lock_irqsave(&port->lock, flags);
1448
1449        /* If we overrun the user experience is crap regardless if we
1450         * drop new or old data. Its much easier to get it right when
1451         * dropping new data so lets do that.
1452         */
1453        if ((port->writep + port->inbufchunk <=
1454             port->flip + port->in_buffer_size) &&
1455            (port->in_buffer_len + port->inbufchunk < IN_BUFFER_SIZE)) {
1456                memcpy(port->writep,
1457                       phys_to_virt((unsigned)port->next_rx_desc->buf),
1458                       port->inbufchunk);
1459                port->writep += port->inbufchunk;
1460                if (port->writep >= port->flip + port->in_buffer_size)
1461                        port->writep = port->flip;
1462
1463                /* Timestamp the new data chunk. */
1464                if (port->write_ts_idx == NBR_IN_DESCR)
1465                        port->write_ts_idx = 0;
1466                idx = port->write_ts_idx++;
1467                ktime_get_ts(&port->timestamp[idx]);
1468                port->in_buffer_len += port->inbufchunk;
1469        }
1470        spin_unlock_irqrestore(&port->lock, flags);
1471
1472        port->next_rx_desc->eol = 1;
1473        port->prev_rx_desc->eol = 0;
1474        /* Cache bug workaround */
1475        flush_dma_descr(port->prev_rx_desc, 0);
1476        port->prev_rx_desc = port->next_rx_desc;
1477        port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
1478        /* Cache bug workaround */
1479        flush_dma_descr(port->prev_rx_desc, 1);
1480        /* wake up the waiting process */
1481        wake_up_interruptible(&port->in_wait_q);
1482        DMA_CONTINUE(port->regi_dmain);
1483        REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
1484
1485}
1486
1487static irqreturn_t rx_interrupt(int irq, void *dev_id)
1488{
1489        reg_dma_r_masked_intr masked;
1490
1491        int i;
1492        int found = 0;
1493
1494        DEBUG(pr_info("rx_interrupt\n"));
1495
1496        for (i = 0; i < NBR_PORTS; i++) {
1497                struct sync_port *port = &ports[i];
1498
1499                if (!port->enabled || !port->use_dma)
1500                        continue;
1501
1502                masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
1503
1504                if (!masked.data)
1505                        continue;
1506
1507                /* Descriptor interrupt */
1508                found = 1;
1509                while (REG_RD(dma, port->regi_dmain, rw_data) !=
1510                                virt_to_phys(port->next_rx_desc))
1511                        handle_rx_packet(port);
1512        }
1513        return IRQ_RETVAL(found);
1514} /* rx_interrupt */
1515#endif /* SYNC_SER_DMA */
1516
1517#ifdef SYNC_SER_MANUAL
1518static irqreturn_t manual_interrupt(int irq, void *dev_id)
1519{
1520        unsigned long flags;
1521        int i;
1522        int found = 0;
1523        reg_sser_r_masked_intr masked;
1524
1525        for (i = 0; i < NBR_PORTS; i++) {
1526                struct sync_port *port = &ports[i];
1527
1528                if (!port->enabled || port->use_dma)
1529                        continue;
1530
1531                masked = REG_RD(sser, port->regi_sser, r_masked_intr);
1532                /* Data received? */
1533                if (masked.rdav) {
1534                        reg_sser_rw_rec_cfg rec_cfg =
1535                                REG_RD(sser, port->regi_sser, rw_rec_cfg);
1536                        reg_sser_r_rec_data data = REG_RD(sser,
1537                                port->regi_sser, r_rec_data);
1538                        found = 1;
1539                        /* Read data */
1540                        spin_lock_irqsave(&port->lock, flags);
1541                        switch (rec_cfg.sample_size) {
1542                        case 8:
1543                                *port->writep++ = data.data & 0xff;
1544                                break;
1545                        case 12:
1546                                *port->writep = (data.data & 0x0ff0) >> 4;
1547                                *(port->writep + 1) = data.data & 0x0f;
1548                                port->writep += 2;
1549                                break;
1550                        case 16:
1551                                *(unsigned short *)port->writep = data.data;
1552                                port->writep += 2;
1553                                break;
1554                        case 24:
1555                                *(unsigned int *)port->writep = data.data;
1556                                port->writep += 3;
1557                                break;
1558                        case 32:
1559                                *(unsigned int *)port->writep = data.data;
1560                                port->writep += 4;
1561                                break;
1562                        }
1563
1564                        /* Wrap? */
1565                        if (port->writep >= port->flip + port->in_buffer_size)
1566                                port->writep = port->flip;
1567                        if (port->writep == port->readp) {
1568                                /* Receive buf overrun, discard oldest data */
1569                                port->readp++;
1570                                /* Wrap? */
1571                                if (port->readp >= port->flip +
1572                                                port->in_buffer_size)
1573                                        port->readp = port->flip;
1574                        }
1575                        spin_unlock_irqrestore(&port->lock, flags);
1576                        if (sync_data_avail(port) >= port->inbufchunk)
1577                                /* Wake up application */
1578                                wake_up_interruptible(&port->in_wait_q);
1579                }
1580
1581                /* Transmitter ready? */
1582                if (masked.trdy) {
1583                        found = 1;
1584                        /* More data to send */
1585                        if (port->out_buf_count > 0)
1586                                send_word(port);
1587                        else {
1588                                /* Transmission finished */
1589                                reg_sser_rw_intr_mask intr_mask;
1590                                intr_mask = REG_RD(sser, port->regi_sser,
1591                                        rw_intr_mask);
1592                                intr_mask.trdy = 0;
1593                                REG_WR(sser, port->regi_sser,
1594                                        rw_intr_mask, intr_mask);
1595                                /* Wake up application */
1596                                wake_up_interruptible(&port->out_wait_q);
1597                        }
1598                }
1599        }
1600        return IRQ_RETVAL(found);
1601}
1602#endif
1603
1604static int __init etrax_sync_serial_init(void)
1605{
1606#if 1
1607        /* This code will be removed when we move to udev for all devices. */
1608        syncser_first = MKDEV(SYNC_SERIAL_MAJOR, 0);
1609        if (register_chrdev_region(syncser_first, minor_count, SYNCSER_NAME)) {
1610                pr_err("Failed to register major %d\n", SYNC_SERIAL_MAJOR);
1611                return -1;
1612        }
1613#else
1614        /* Allocate dynamic major number. */
1615        if (alloc_chrdev_region(&syncser_first, 0, minor_count, SYNCSER_NAME)) {
1616                pr_err("Failed to allocate character device region\n");
1617                return -1;
1618        }
1619#endif
1620        syncser_cdev = cdev_alloc();
1621        if (!syncser_cdev) {
1622                pr_err("Failed to allocate cdev for syncser\n");
1623                unregister_chrdev_region(syncser_first, minor_count);
1624                return -1;
1625        }
1626        cdev_init(syncser_cdev, &syncser_fops);
1627
1628        /* Create a sysfs class for syncser */
1629        syncser_class = class_create(THIS_MODULE, "syncser_class");
1630
1631        /* Initialize Ports */
1632#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
1633        if (artpec_pinmux_alloc_fixed(PINMUX_SSER0)) {
1634                pr_warn("Unable to alloc pins for synchronous serial port 0\n");
1635                unregister_chrdev_region(syncser_first, minor_count);
1636                return -EIO;
1637        }
1638        initialize_port(0);
1639        ports[0].enabled = 1;
1640        /* Register with sysfs so udev can pick it up. */
1641        device_create(syncser_class, NULL, syncser_first, NULL,
1642                      "%s%d", SYNCSER_NAME, 0);
1643#endif
1644
1645#if defined(CONFIG_ETRAXFS) && defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
1646        if (artpec_pinmux_alloc_fixed(PINMUX_SSER1)) {
1647                pr_warn("Unable to alloc pins for synchronous serial port 1\n");
1648                unregister_chrdev_region(syncser_first, minor_count);
1649                class_destroy(syncser_class);
1650                return -EIO;
1651        }
1652        initialize_port(1);
1653        ports[1].enabled = 1;
1654        /* Register with sysfs so udev can pick it up. */
1655        device_create(syncser_class, NULL, syncser_first, NULL,
1656                      "%s%d", SYNCSER_NAME, 0);
1657#endif
1658
1659        /* Add it to system */
1660        if (cdev_add(syncser_cdev, syncser_first, minor_count) < 0) {
1661                pr_err("Failed to add syncser as char device\n");
1662                device_destroy(syncser_class, syncser_first);
1663                class_destroy(syncser_class);
1664                cdev_del(syncser_cdev);
1665                unregister_chrdev_region(syncser_first, minor_count);
1666                return -1;
1667        }
1668
1669
1670        pr_info("ARTPEC synchronous serial port (%s: %d, %d)\n",
1671                SYNCSER_NAME, MAJOR(syncser_first), MINOR(syncser_first));
1672
1673        return 0;
1674}
1675
1676static void __exit etrax_sync_serial_exit(void)
1677{
1678        int i;
1679        device_destroy(syncser_class, syncser_first);
1680        class_destroy(syncser_class);
1681
1682        if (syncser_cdev) {
1683                cdev_del(syncser_cdev);
1684                unregister_chrdev_region(syncser_first, minor_count);
1685        }
1686        for (i = 0; i < NBR_PORTS; i++) {
1687                struct sync_port *port = &ports[i];
1688                if (port->init_irqs == dma_irq_setup) {
1689                        /* Free dma irqs and dma channels. */
1690#ifdef SYNC_SER_DMA
1691                        artpec_free_dma(port->dma_in_nbr);
1692                        artpec_free_dma(port->dma_out_nbr);
1693                        free_irq(port->dma_out_intr_vect, port);
1694                        free_irq(port->dma_in_intr_vect, port);
1695#endif
1696                } else if (port->init_irqs == manual_irq_setup) {
1697                        /* Free manual irq. */
1698                        free_irq(port->syncser_intr_vect, port);
1699                }
1700        }
1701
1702        pr_info("ARTPEC synchronous serial port unregistered\n");
1703}
1704
1705module_init(etrax_sync_serial_init);
1706module_exit(etrax_sync_serial_exit);
1707
1708MODULE_LICENSE("GPL");
1709
1710