linux/drivers/spi/spi-pl022.c
<<
>>
Prefs
   1/*
   2 * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
   3 *
   4 * Copyright (C) 2008-2012 ST-Ericsson AB
   5 * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
   6 *
   7 * Author: Linus Walleij <linus.walleij@stericsson.com>
   8 *
   9 * Initial version inspired by:
  10 *      linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
  11 * Initial adoption to PL022 by:
  12 *      Sachin Verma <sachin.verma@st.com>
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License as published by
  16 * the Free Software Foundation; either version 2 of the License, or
  17 * (at your option) any later version.
  18 *
  19 * This program is distributed in the hope that it will be useful,
  20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  22 * GNU General Public License for more details.
  23 */
  24
  25#include <linux/init.h>
  26#include <linux/module.h>
  27#include <linux/device.h>
  28#include <linux/ioport.h>
  29#include <linux/errno.h>
  30#include <linux/interrupt.h>
  31#include <linux/spi/spi.h>
  32#include <linux/delay.h>
  33#include <linux/clk.h>
  34#include <linux/err.h>
  35#include <linux/amba/bus.h>
  36#include <linux/amba/pl022.h>
  37#include <linux/io.h>
  38#include <linux/slab.h>
  39#include <linux/dmaengine.h>
  40#include <linux/dma-mapping.h>
  41#include <linux/scatterlist.h>
  42#include <linux/pm_runtime.h>
  43#include <linux/gpio.h>
  44#include <linux/of_gpio.h>
  45#include <linux/pinctrl/consumer.h>
  46
  47/*
  48 * This macro is used to define some register default values.
  49 * reg is masked with mask, the OR:ed with an (again masked)
  50 * val shifted sb steps to the left.
  51 */
  52#define SSP_WRITE_BITS(reg, val, mask, sb) \
  53 ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
  54
  55/*
  56 * This macro is also used to define some default values.
  57 * It will just shift val by sb steps to the left and mask
  58 * the result with mask.
  59 */
  60#define GEN_MASK_BITS(val, mask, sb) \
  61 (((val)<<(sb)) & (mask))
  62
  63#define DRIVE_TX                0
  64#define DO_NOT_DRIVE_TX         1
  65
  66#define DO_NOT_QUEUE_DMA        0
  67#define QUEUE_DMA               1
  68
  69#define RX_TRANSFER             1
  70#define TX_TRANSFER             2
  71
  72/*
  73 * Macros to access SSP Registers with their offsets
  74 */
  75#define SSP_CR0(r)      (r + 0x000)
  76#define SSP_CR1(r)      (r + 0x004)
  77#define SSP_DR(r)       (r + 0x008)
  78#define SSP_SR(r)       (r + 0x00C)
  79#define SSP_CPSR(r)     (r + 0x010)
  80#define SSP_IMSC(r)     (r + 0x014)
  81#define SSP_RIS(r)      (r + 0x018)
  82#define SSP_MIS(r)      (r + 0x01C)
  83#define SSP_ICR(r)      (r + 0x020)
  84#define SSP_DMACR(r)    (r + 0x024)
  85#define SSP_CSR(r)      (r + 0x030) /* vendor extension */
  86#define SSP_ITCR(r)     (r + 0x080)
  87#define SSP_ITIP(r)     (r + 0x084)
  88#define SSP_ITOP(r)     (r + 0x088)
  89#define SSP_TDR(r)      (r + 0x08C)
  90
  91#define SSP_PID0(r)     (r + 0xFE0)
  92#define SSP_PID1(r)     (r + 0xFE4)
  93#define SSP_PID2(r)     (r + 0xFE8)
  94#define SSP_PID3(r)     (r + 0xFEC)
  95
  96#define SSP_CID0(r)     (r + 0xFF0)
  97#define SSP_CID1(r)     (r + 0xFF4)
  98#define SSP_CID2(r)     (r + 0xFF8)
  99#define SSP_CID3(r)     (r + 0xFFC)
 100
 101/*
 102 * SSP Control Register 0  - SSP_CR0
 103 */
 104#define SSP_CR0_MASK_DSS        (0x0FUL << 0)
 105#define SSP_CR0_MASK_FRF        (0x3UL << 4)
 106#define SSP_CR0_MASK_SPO        (0x1UL << 6)
 107#define SSP_CR0_MASK_SPH        (0x1UL << 7)
 108#define SSP_CR0_MASK_SCR        (0xFFUL << 8)
 109
 110/*
 111 * The ST version of this block moves som bits
 112 * in SSP_CR0 and extends it to 32 bits
 113 */
 114#define SSP_CR0_MASK_DSS_ST     (0x1FUL << 0)
 115#define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5)
 116#define SSP_CR0_MASK_CSS_ST     (0x1FUL << 16)
 117#define SSP_CR0_MASK_FRF_ST     (0x3UL << 21)
 118
 119/*
 120 * SSP Control Register 0  - SSP_CR1
 121 */
 122#define SSP_CR1_MASK_LBM        (0x1UL << 0)
 123#define SSP_CR1_MASK_SSE        (0x1UL << 1)
 124#define SSP_CR1_MASK_MS         (0x1UL << 2)
 125#define SSP_CR1_MASK_SOD        (0x1UL << 3)
 126
 127/*
 128 * The ST version of this block adds some bits
 129 * in SSP_CR1
 130 */
 131#define SSP_CR1_MASK_RENDN_ST   (0x1UL << 4)
 132#define SSP_CR1_MASK_TENDN_ST   (0x1UL << 5)
 133#define SSP_CR1_MASK_MWAIT_ST   (0x1UL << 6)
 134#define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7)
 135#define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10)
 136/* This one is only in the PL023 variant */
 137#define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13)
 138
 139/*
 140 * SSP Status Register - SSP_SR
 141 */
 142#define SSP_SR_MASK_TFE         (0x1UL << 0) /* Transmit FIFO empty */
 143#define SSP_SR_MASK_TNF         (0x1UL << 1) /* Transmit FIFO not full */
 144#define SSP_SR_MASK_RNE         (0x1UL << 2) /* Receive FIFO not empty */
 145#define SSP_SR_MASK_RFF         (0x1UL << 3) /* Receive FIFO full */
 146#define SSP_SR_MASK_BSY         (0x1UL << 4) /* Busy Flag */
 147
 148/*
 149 * SSP Clock Prescale Register  - SSP_CPSR
 150 */
 151#define SSP_CPSR_MASK_CPSDVSR   (0xFFUL << 0)
 152
 153/*
 154 * SSP Interrupt Mask Set/Clear Register - SSP_IMSC
 155 */
 156#define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */
 157#define SSP_IMSC_MASK_RTIM  (0x1UL << 1) /* Receive timeout Interrupt mask */
 158#define SSP_IMSC_MASK_RXIM  (0x1UL << 2) /* Receive FIFO Interrupt mask */
 159#define SSP_IMSC_MASK_TXIM  (0x1UL << 3) /* Transmit FIFO Interrupt mask */
 160
 161/*
 162 * SSP Raw Interrupt Status Register - SSP_RIS
 163 */
 164/* Receive Overrun Raw Interrupt status */
 165#define SSP_RIS_MASK_RORRIS             (0x1UL << 0)
 166/* Receive Timeout Raw Interrupt status */
 167#define SSP_RIS_MASK_RTRIS              (0x1UL << 1)
 168/* Receive FIFO Raw Interrupt status */
 169#define SSP_RIS_MASK_RXRIS              (0x1UL << 2)
 170/* Transmit FIFO Raw Interrupt status */
 171#define SSP_RIS_MASK_TXRIS              (0x1UL << 3)
 172
 173/*
 174 * SSP Masked Interrupt Status Register - SSP_MIS
 175 */
 176/* Receive Overrun Masked Interrupt status */
 177#define SSP_MIS_MASK_RORMIS             (0x1UL << 0)
 178/* Receive Timeout Masked Interrupt status */
 179#define SSP_MIS_MASK_RTMIS              (0x1UL << 1)
 180/* Receive FIFO Masked Interrupt status */
 181#define SSP_MIS_MASK_RXMIS              (0x1UL << 2)
 182/* Transmit FIFO Masked Interrupt status */
 183#define SSP_MIS_MASK_TXMIS              (0x1UL << 3)
 184
 185/*
 186 * SSP Interrupt Clear Register - SSP_ICR
 187 */
 188/* Receive Overrun Raw Clear Interrupt bit */
 189#define SSP_ICR_MASK_RORIC              (0x1UL << 0)
 190/* Receive Timeout Clear Interrupt bit */
 191#define SSP_ICR_MASK_RTIC               (0x1UL << 1)
 192
 193/*
 194 * SSP DMA Control Register - SSP_DMACR
 195 */
 196/* Receive DMA Enable bit */
 197#define SSP_DMACR_MASK_RXDMAE           (0x1UL << 0)
 198/* Transmit DMA Enable bit */
 199#define SSP_DMACR_MASK_TXDMAE           (0x1UL << 1)
 200
 201/*
 202 * SSP Chip Select Control Register - SSP_CSR
 203 * (vendor extension)
 204 */
 205#define SSP_CSR_CSVALUE_MASK            (0x1FUL << 0)
 206
 207/*
 208 * SSP Integration Test control Register - SSP_ITCR
 209 */
 210#define SSP_ITCR_MASK_ITEN              (0x1UL << 0)
 211#define SSP_ITCR_MASK_TESTFIFO          (0x1UL << 1)
 212
 213/*
 214 * SSP Integration Test Input Register - SSP_ITIP
 215 */
 216#define ITIP_MASK_SSPRXD                 (0x1UL << 0)
 217#define ITIP_MASK_SSPFSSIN               (0x1UL << 1)
 218#define ITIP_MASK_SSPCLKIN               (0x1UL << 2)
 219#define ITIP_MASK_RXDMAC                 (0x1UL << 3)
 220#define ITIP_MASK_TXDMAC                 (0x1UL << 4)
 221#define ITIP_MASK_SSPTXDIN               (0x1UL << 5)
 222
 223/*
 224 * SSP Integration Test output Register - SSP_ITOP
 225 */
 226#define ITOP_MASK_SSPTXD                 (0x1UL << 0)
 227#define ITOP_MASK_SSPFSSOUT              (0x1UL << 1)
 228#define ITOP_MASK_SSPCLKOUT              (0x1UL << 2)
 229#define ITOP_MASK_SSPOEn                 (0x1UL << 3)
 230#define ITOP_MASK_SSPCTLOEn              (0x1UL << 4)
 231#define ITOP_MASK_RORINTR                (0x1UL << 5)
 232#define ITOP_MASK_RTINTR                 (0x1UL << 6)
 233#define ITOP_MASK_RXINTR                 (0x1UL << 7)
 234#define ITOP_MASK_TXINTR                 (0x1UL << 8)
 235#define ITOP_MASK_INTR                   (0x1UL << 9)
 236#define ITOP_MASK_RXDMABREQ              (0x1UL << 10)
 237#define ITOP_MASK_RXDMASREQ              (0x1UL << 11)
 238#define ITOP_MASK_TXDMABREQ              (0x1UL << 12)
 239#define ITOP_MASK_TXDMASREQ              (0x1UL << 13)
 240
 241/*
 242 * SSP Test Data Register - SSP_TDR
 243 */
 244#define TDR_MASK_TESTDATA               (0xFFFFFFFF)
 245
 246/*
 247 * Message State
 248 * we use the spi_message.state (void *) pointer to
 249 * hold a single state value, that's why all this
 250 * (void *) casting is done here.
 251 */
 252#define STATE_START                     ((void *) 0)
 253#define STATE_RUNNING                   ((void *) 1)
 254#define STATE_DONE                      ((void *) 2)
 255#define STATE_ERROR                     ((void *) -1)
 256
 257/*
 258 * SSP State - Whether Enabled or Disabled
 259 */
 260#define SSP_DISABLED                    (0)
 261#define SSP_ENABLED                     (1)
 262
 263/*
 264 * SSP DMA State - Whether DMA Enabled or Disabled
 265 */
 266#define SSP_DMA_DISABLED                (0)
 267#define SSP_DMA_ENABLED                 (1)
 268
 269/*
 270 * SSP Clock Defaults
 271 */
 272#define SSP_DEFAULT_CLKRATE 0x2
 273#define SSP_DEFAULT_PRESCALE 0x40
 274
 275/*
 276 * SSP Clock Parameter ranges
 277 */
 278#define CPSDVR_MIN 0x02
 279#define CPSDVR_MAX 0xFE
 280#define SCR_MIN 0x00
 281#define SCR_MAX 0xFF
 282
 283/*
 284 * SSP Interrupt related Macros
 285 */
 286#define DEFAULT_SSP_REG_IMSC  0x0UL
 287#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
 288#define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC)
 289
 290#define CLEAR_ALL_INTERRUPTS  0x3
 291
 292#define SPI_POLLING_TIMEOUT 1000
 293
 294/*
 295 * The type of reading going on on this chip
 296 */
 297enum ssp_reading {
 298        READING_NULL,
 299        READING_U8,
 300        READING_U16,
 301        READING_U32
 302};
 303
 304/**
 305 * The type of writing going on on this chip
 306 */
 307enum ssp_writing {
 308        WRITING_NULL,
 309        WRITING_U8,
 310        WRITING_U16,
 311        WRITING_U32
 312};
 313
 314/**
 315 * struct vendor_data - vendor-specific config parameters
 316 * for PL022 derivates
 317 * @fifodepth: depth of FIFOs (both)
 318 * @max_bpw: maximum number of bits per word
 319 * @unidir: supports unidirection transfers
 320 * @extended_cr: 32 bit wide control register 0 with extra
 321 * features and extra features in CR1 as found in the ST variants
 322 * @pl023: supports a subset of the ST extensions called "PL023"
 323 * @internal_cs_ctrl: supports chip select control register
 324 */
 325struct vendor_data {
 326        int fifodepth;
 327        int max_bpw;
 328        bool unidir;
 329        bool extended_cr;
 330        bool pl023;
 331        bool loopback;
 332        bool internal_cs_ctrl;
 333};
 334
 335/**
 336 * struct pl022 - This is the private SSP driver data structure
 337 * @adev: AMBA device model hookup
 338 * @vendor: vendor data for the IP block
 339 * @phybase: the physical memory where the SSP device resides
 340 * @virtbase: the virtual memory where the SSP is mapped
 341 * @clk: outgoing clock "SPICLK" for the SPI bus
 342 * @master: SPI framework hookup
 343 * @master_info: controller-specific data from machine setup
 344 * @kworker: thread struct for message pump
 345 * @kworker_task: pointer to task for message pump kworker thread
 346 * @pump_messages: work struct for scheduling work to the message pump
 347 * @queue_lock: spinlock to syncronise access to message queue
 348 * @queue: message queue
 349 * @busy: message pump is busy
 350 * @running: message pump is running
 351 * @pump_transfers: Tasklet used in Interrupt Transfer mode
 352 * @cur_msg: Pointer to current spi_message being processed
 353 * @cur_transfer: Pointer to current spi_transfer
 354 * @cur_chip: pointer to current clients chip(assigned from controller_state)
 355 * @next_msg_cs_active: the next message in the queue has been examined
 356 *  and it was found that it uses the same chip select as the previous
 357 *  message, so we left it active after the previous transfer, and it's
 358 *  active already.
 359 * @tx: current position in TX buffer to be read
 360 * @tx_end: end position in TX buffer to be read
 361 * @rx: current position in RX buffer to be written
 362 * @rx_end: end position in RX buffer to be written
 363 * @read: the type of read currently going on
 364 * @write: the type of write currently going on
 365 * @exp_fifo_level: expected FIFO level
 366 * @dma_rx_channel: optional channel for RX DMA
 367 * @dma_tx_channel: optional channel for TX DMA
 368 * @sgt_rx: scattertable for the RX transfer
 369 * @sgt_tx: scattertable for the TX transfer
 370 * @dummypage: a dummy page used for driving data on the bus with DMA
 371 * @cur_cs: current chip select (gpio)
 372 * @chipselects: list of chipselects (gpios)
 373 */
 374struct pl022 {
 375        struct amba_device              *adev;
 376        struct vendor_data              *vendor;
 377        resource_size_t                 phybase;
 378        void __iomem                    *virtbase;
 379        struct clk                      *clk;
 380        struct spi_master               *master;
 381        struct pl022_ssp_controller     *master_info;
 382        /* Message per-transfer pump */
 383        struct tasklet_struct           pump_transfers;
 384        struct spi_message              *cur_msg;
 385        struct spi_transfer             *cur_transfer;
 386        struct chip_data                *cur_chip;
 387        bool                            next_msg_cs_active;
 388        void                            *tx;
 389        void                            *tx_end;
 390        void                            *rx;
 391        void                            *rx_end;
 392        enum ssp_reading                read;
 393        enum ssp_writing                write;
 394        u32                             exp_fifo_level;
 395        enum ssp_rx_level_trig          rx_lev_trig;
 396        enum ssp_tx_level_trig          tx_lev_trig;
 397        /* DMA settings */
 398#ifdef CONFIG_DMA_ENGINE
 399        struct dma_chan                 *dma_rx_channel;
 400        struct dma_chan                 *dma_tx_channel;
 401        struct sg_table                 sgt_rx;
 402        struct sg_table                 sgt_tx;
 403        char                            *dummypage;
 404        bool                            dma_running;
 405#endif
 406        int cur_cs;
 407        int *chipselects;
 408};
 409
 410/**
 411 * struct chip_data - To maintain runtime state of SSP for each client chip
 412 * @cr0: Value of control register CR0 of SSP - on later ST variants this
 413 *       register is 32 bits wide rather than just 16
 414 * @cr1: Value of control register CR1 of SSP
 415 * @dmacr: Value of DMA control Register of SSP
 416 * @cpsr: Value of Clock prescale register
 417 * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
 418 * @enable_dma: Whether to enable DMA or not
 419 * @read: function ptr to be used to read when doing xfer for this chip
 420 * @write: function ptr to be used to write when doing xfer for this chip
 421 * @cs_control: chip select callback provided by chip
 422 * @xfer_type: polling/interrupt/DMA
 423 *
 424 * Runtime state of the SSP controller, maintained per chip,
 425 * This would be set according to the current message that would be served
 426 */
 427struct chip_data {
 428        u32 cr0;
 429        u16 cr1;
 430        u16 dmacr;
 431        u16 cpsr;
 432        u8 n_bytes;
 433        bool enable_dma;
 434        enum ssp_reading read;
 435        enum ssp_writing write;
 436        void (*cs_control) (u32 command);
 437        int xfer_type;
 438};
 439
 440/**
 441 * null_cs_control - Dummy chip select function
 442 * @command: select/delect the chip
 443 *
 444 * If no chip select function is provided by client this is used as dummy
 445 * chip select
 446 */
 447static void null_cs_control(u32 command)
 448{
 449        pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
 450}
 451
 452/**
 453 * internal_cs_control - Control chip select signals via SSP_CSR.
 454 * @pl022: SSP driver private data structure
 455 * @command: select/delect the chip
 456 *
 457 * Used on controller with internal chip select control via SSP_CSR register
 458 * (vendor extension). Each of the 5 LSB in the register controls one chip
 459 * select signal.
 460 */
 461static void internal_cs_control(struct pl022 *pl022, u32 command)
 462{
 463        u32 tmp;
 464
 465        tmp = readw(SSP_CSR(pl022->virtbase));
 466        if (command == SSP_CHIP_SELECT)
 467                tmp &= ~BIT(pl022->cur_cs);
 468        else
 469                tmp |= BIT(pl022->cur_cs);
 470        writew(tmp, SSP_CSR(pl022->virtbase));
 471}
 472
 473static void pl022_cs_control(struct pl022 *pl022, u32 command)
 474{
 475        if (pl022->vendor->internal_cs_ctrl)
 476                internal_cs_control(pl022, command);
 477        else if (gpio_is_valid(pl022->cur_cs))
 478                gpio_set_value(pl022->cur_cs, command);
 479        else
 480                pl022->cur_chip->cs_control(command);
 481}
 482
 483/**
 484 * giveback - current spi_message is over, schedule next message and call
 485 * callback of this message. Assumes that caller already
 486 * set message->status; dma and pio irqs are blocked
 487 * @pl022: SSP driver private data structure
 488 */
 489static void giveback(struct pl022 *pl022)
 490{
 491        struct spi_transfer *last_transfer;
 492        pl022->next_msg_cs_active = false;
 493
 494        last_transfer = list_last_entry(&pl022->cur_msg->transfers,
 495                                        struct spi_transfer, transfer_list);
 496
 497        /* Delay if requested before any change in chip select */
 498        if (last_transfer->delay_usecs)
 499                /*
 500                 * FIXME: This runs in interrupt context.
 501                 * Is this really smart?
 502                 */
 503                udelay(last_transfer->delay_usecs);
 504
 505        if (!last_transfer->cs_change) {
 506                struct spi_message *next_msg;
 507
 508                /*
 509                 * cs_change was not set. We can keep the chip select
 510                 * enabled if there is message in the queue and it is
 511                 * for the same spi device.
 512                 *
 513                 * We cannot postpone this until pump_messages, because
 514                 * after calling msg->complete (below) the driver that
 515                 * sent the current message could be unloaded, which
 516                 * could invalidate the cs_control() callback...
 517                 */
 518                /* get a pointer to the next message, if any */
 519                next_msg = spi_get_next_queued_message(pl022->master);
 520
 521                /*
 522                 * see if the next and current messages point
 523                 * to the same spi device.
 524                 */
 525                if (next_msg && next_msg->spi != pl022->cur_msg->spi)
 526                        next_msg = NULL;
 527                if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
 528                        pl022_cs_control(pl022, SSP_CHIP_DESELECT);
 529                else
 530                        pl022->next_msg_cs_active = true;
 531
 532        }
 533
 534        pl022->cur_msg = NULL;
 535        pl022->cur_transfer = NULL;
 536        pl022->cur_chip = NULL;
 537
 538        /* disable the SPI/SSP operation */
 539        writew((readw(SSP_CR1(pl022->virtbase)) &
 540                (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
 541
 542        spi_finalize_current_message(pl022->master);
 543}
 544
 545/**
 546 * flush - flush the FIFO to reach a clean state
 547 * @pl022: SSP driver private data structure
 548 */
 549static int flush(struct pl022 *pl022)
 550{
 551        unsigned long limit = loops_per_jiffy << 1;
 552
 553        dev_dbg(&pl022->adev->dev, "flush\n");
 554        do {
 555                while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
 556                        readw(SSP_DR(pl022->virtbase));
 557        } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
 558
 559        pl022->exp_fifo_level = 0;
 560
 561        return limit;
 562}
 563
 564/**
 565 * restore_state - Load configuration of current chip
 566 * @pl022: SSP driver private data structure
 567 */
 568static void restore_state(struct pl022 *pl022)
 569{
 570        struct chip_data *chip = pl022->cur_chip;
 571
 572        if (pl022->vendor->extended_cr)
 573                writel(chip->cr0, SSP_CR0(pl022->virtbase));
 574        else
 575                writew(chip->cr0, SSP_CR0(pl022->virtbase));
 576        writew(chip->cr1, SSP_CR1(pl022->virtbase));
 577        writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
 578        writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
 579        writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
 580        writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
 581}
 582
 583/*
 584 * Default SSP Register Values
 585 */
 586#define DEFAULT_SSP_REG_CR0 ( \
 587        GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0)    | \
 588        GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \
 589        GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
 590        GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
 591        GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
 592)
 593
 594/* ST versions have slightly different bit layout */
 595#define DEFAULT_SSP_REG_CR0_ST ( \
 596        GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
 597        GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \
 598        GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
 599        GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
 600        GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
 601        GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16)      | \
 602        GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \
 603)
 604
 605/* The PL023 version is slightly different again */
 606#define DEFAULT_SSP_REG_CR0_ST_PL023 ( \
 607        GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
 608        GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
 609        GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
 610        GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
 611)
 612
 613#define DEFAULT_SSP_REG_CR1 ( \
 614        GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
 615        GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
 616        GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
 617        GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \
 618)
 619
 620/* ST versions extend this register to use all 16 bits */
 621#define DEFAULT_SSP_REG_CR1_ST ( \
 622        DEFAULT_SSP_REG_CR1 | \
 623        GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
 624        GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
 625        GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\
 626        GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
 627        GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \
 628)
 629
 630/*
 631 * The PL023 variant has further differences: no loopback mode, no microwire
 632 * support, and a new clock feedback delay setting.
 633 */
 634#define DEFAULT_SSP_REG_CR1_ST_PL023 ( \
 635        GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
 636        GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
 637        GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
 638        GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
 639        GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
 640        GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
 641        GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \
 642        GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \
 643)
 644
 645#define DEFAULT_SSP_REG_CPSR ( \
 646        GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
 647)
 648
 649#define DEFAULT_SSP_REG_DMACR (\
 650        GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
 651        GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
 652)
 653
 654/**
 655 * load_ssp_default_config - Load default configuration for SSP
 656 * @pl022: SSP driver private data structure
 657 */
 658static void load_ssp_default_config(struct pl022 *pl022)
 659{
 660        if (pl022->vendor->pl023) {
 661                writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase));
 662                writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase));
 663        } else if (pl022->vendor->extended_cr) {
 664                writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase));
 665                writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase));
 666        } else {
 667                writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
 668                writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
 669        }
 670        writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
 671        writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
 672        writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
 673        writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
 674}
 675
 676/**
 677 * This will write to TX and read from RX according to the parameters
 678 * set in pl022.
 679 */
 680static void readwriter(struct pl022 *pl022)
 681{
 682
 683        /*
 684         * The FIFO depth is different between primecell variants.
 685         * I believe filling in too much in the FIFO might cause
 686         * errons in 8bit wide transfers on ARM variants (just 8 words
 687         * FIFO, means only 8x8 = 64 bits in FIFO) at least.
 688         *
 689         * To prevent this issue, the TX FIFO is only filled to the
 690         * unused RX FIFO fill length, regardless of what the TX
 691         * FIFO status flag indicates.
 692         */
 693        dev_dbg(&pl022->adev->dev,
 694                "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
 695                __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
 696
 697        /* Read as much as you can */
 698        while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
 699               && (pl022->rx < pl022->rx_end)) {
 700                switch (pl022->read) {
 701                case READING_NULL:
 702                        readw(SSP_DR(pl022->virtbase));
 703                        break;
 704                case READING_U8:
 705                        *(u8 *) (pl022->rx) =
 706                                readw(SSP_DR(pl022->virtbase)) & 0xFFU;
 707                        break;
 708                case READING_U16:
 709                        *(u16 *) (pl022->rx) =
 710                                (u16) readw(SSP_DR(pl022->virtbase));
 711                        break;
 712                case READING_U32:
 713                        *(u32 *) (pl022->rx) =
 714                                readl(SSP_DR(pl022->virtbase));
 715                        break;
 716                }
 717                pl022->rx += (pl022->cur_chip->n_bytes);
 718                pl022->exp_fifo_level--;
 719        }
 720        /*
 721         * Write as much as possible up to the RX FIFO size
 722         */
 723        while ((pl022->exp_fifo_level < pl022->vendor->fifodepth)
 724               && (pl022->tx < pl022->tx_end)) {
 725                switch (pl022->write) {
 726                case WRITING_NULL:
 727                        writew(0x0, SSP_DR(pl022->virtbase));
 728                        break;
 729                case WRITING_U8:
 730                        writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
 731                        break;
 732                case WRITING_U16:
 733                        writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
 734                        break;
 735                case WRITING_U32:
 736                        writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
 737                        break;
 738                }
 739                pl022->tx += (pl022->cur_chip->n_bytes);
 740                pl022->exp_fifo_level++;
 741                /*
 742                 * This inner reader takes care of things appearing in the RX
 743                 * FIFO as we're transmitting. This will happen a lot since the
 744                 * clock starts running when you put things into the TX FIFO,
 745                 * and then things are continuously clocked into the RX FIFO.
 746                 */
 747                while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
 748                       && (pl022->rx < pl022->rx_end)) {
 749                        switch (pl022->read) {
 750                        case READING_NULL:
 751                                readw(SSP_DR(pl022->virtbase));
 752                                break;
 753                        case READING_U8:
 754                                *(u8 *) (pl022->rx) =
 755                                        readw(SSP_DR(pl022->virtbase)) & 0xFFU;
 756                                break;
 757                        case READING_U16:
 758                                *(u16 *) (pl022->rx) =
 759                                        (u16) readw(SSP_DR(pl022->virtbase));
 760                                break;
 761                        case READING_U32:
 762                                *(u32 *) (pl022->rx) =
 763                                        readl(SSP_DR(pl022->virtbase));
 764                                break;
 765                        }
 766                        pl022->rx += (pl022->cur_chip->n_bytes);
 767                        pl022->exp_fifo_level--;
 768                }
 769        }
 770        /*
 771         * When we exit here the TX FIFO should be full and the RX FIFO
 772         * should be empty
 773         */
 774}
 775
 776/**
 777 * next_transfer - Move to the Next transfer in the current spi message
 778 * @pl022: SSP driver private data structure
 779 *
 780 * This function moves though the linked list of spi transfers in the
 781 * current spi message and returns with the state of current spi
 782 * message i.e whether its last transfer is done(STATE_DONE) or
 783 * Next transfer is ready(STATE_RUNNING)
 784 */
 785static void *next_transfer(struct pl022 *pl022)
 786{
 787        struct spi_message *msg = pl022->cur_msg;
 788        struct spi_transfer *trans = pl022->cur_transfer;
 789
 790        /* Move to next transfer */
 791        if (trans->transfer_list.next != &msg->transfers) {
 792                pl022->cur_transfer =
 793                    list_entry(trans->transfer_list.next,
 794                               struct spi_transfer, transfer_list);
 795                return STATE_RUNNING;
 796        }
 797        return STATE_DONE;
 798}
 799
 800/*
 801 * This DMA functionality is only compiled in if we have
 802 * access to the generic DMA devices/DMA engine.
 803 */
 804#ifdef CONFIG_DMA_ENGINE
 805static void unmap_free_dma_scatter(struct pl022 *pl022)
 806{
 807        /* Unmap and free the SG tables */
 808        dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl,
 809                     pl022->sgt_tx.nents, DMA_TO_DEVICE);
 810        dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl,
 811                     pl022->sgt_rx.nents, DMA_FROM_DEVICE);
 812        sg_free_table(&pl022->sgt_rx);
 813        sg_free_table(&pl022->sgt_tx);
 814}
 815
 816static void dma_callback(void *data)
 817{
 818        struct pl022 *pl022 = data;
 819        struct spi_message *msg = pl022->cur_msg;
 820
 821        BUG_ON(!pl022->sgt_rx.sgl);
 822
 823#ifdef VERBOSE_DEBUG
 824        /*
 825         * Optionally dump out buffers to inspect contents, this is
 826         * good if you want to convince yourself that the loopback
 827         * read/write contents are the same, when adopting to a new
 828         * DMA engine.
 829         */
 830        {
 831                struct scatterlist *sg;
 832                unsigned int i;
 833
 834                dma_sync_sg_for_cpu(&pl022->adev->dev,
 835                                    pl022->sgt_rx.sgl,
 836                                    pl022->sgt_rx.nents,
 837                                    DMA_FROM_DEVICE);
 838
 839                for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) {
 840                        dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i);
 841                        print_hex_dump(KERN_ERR, "SPI RX: ",
 842                                       DUMP_PREFIX_OFFSET,
 843                                       16,
 844                                       1,
 845                                       sg_virt(sg),
 846                                       sg_dma_len(sg),
 847                                       1);
 848                }
 849                for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) {
 850                        dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i);
 851                        print_hex_dump(KERN_ERR, "SPI TX: ",
 852                                       DUMP_PREFIX_OFFSET,
 853                                       16,
 854                                       1,
 855                                       sg_virt(sg),
 856                                       sg_dma_len(sg),
 857                                       1);
 858                }
 859        }
 860#endif
 861
 862        unmap_free_dma_scatter(pl022);
 863
 864        /* Update total bytes transferred */
 865        msg->actual_length += pl022->cur_transfer->len;
 866        if (pl022->cur_transfer->cs_change)
 867                pl022_cs_control(pl022, SSP_CHIP_DESELECT);
 868
 869        /* Move to next transfer */
 870        msg->state = next_transfer(pl022);
 871        tasklet_schedule(&pl022->pump_transfers);
 872}
 873
 874static void setup_dma_scatter(struct pl022 *pl022,
 875                              void *buffer,
 876                              unsigned int length,
 877                              struct sg_table *sgtab)
 878{
 879        struct scatterlist *sg;
 880        int bytesleft = length;
 881        void *bufp = buffer;
 882        int mapbytes;
 883        int i;
 884
 885        if (buffer) {
 886                for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
 887                        /*
 888                         * If there are less bytes left than what fits
 889                         * in the current page (plus page alignment offset)
 890                         * we just feed in this, else we stuff in as much
 891                         * as we can.
 892                         */
 893                        if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
 894                                mapbytes = bytesleft;
 895                        else
 896                                mapbytes = PAGE_SIZE - offset_in_page(bufp);
 897                        sg_set_page(sg, virt_to_page(bufp),
 898                                    mapbytes, offset_in_page(bufp));
 899                        bufp += mapbytes;
 900                        bytesleft -= mapbytes;
 901                        dev_dbg(&pl022->adev->dev,
 902                                "set RX/TX target page @ %p, %d bytes, %d left\n",
 903                                bufp, mapbytes, bytesleft);
 904                }
 905        } else {
 906                /* Map the dummy buffer on every page */
 907                for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
 908                        if (bytesleft < PAGE_SIZE)
 909                                mapbytes = bytesleft;
 910                        else
 911                                mapbytes = PAGE_SIZE;
 912                        sg_set_page(sg, virt_to_page(pl022->dummypage),
 913                                    mapbytes, 0);
 914                        bytesleft -= mapbytes;
 915                        dev_dbg(&pl022->adev->dev,
 916                                "set RX/TX to dummy page %d bytes, %d left\n",
 917                                mapbytes, bytesleft);
 918
 919                }
 920        }
 921        BUG_ON(bytesleft);
 922}
 923
 924/**
 925 * configure_dma - configures the channels for the next transfer
 926 * @pl022: SSP driver's private data structure
 927 */
 928static int configure_dma(struct pl022 *pl022)
 929{
 930        struct dma_slave_config rx_conf = {
 931                .src_addr = SSP_DR(pl022->phybase),
 932                .direction = DMA_DEV_TO_MEM,
 933                .device_fc = false,
 934        };
 935        struct dma_slave_config tx_conf = {
 936                .dst_addr = SSP_DR(pl022->phybase),
 937                .direction = DMA_MEM_TO_DEV,
 938                .device_fc = false,
 939        };
 940        unsigned int pages;
 941        int ret;
 942        int rx_sglen, tx_sglen;
 943        struct dma_chan *rxchan = pl022->dma_rx_channel;
 944        struct dma_chan *txchan = pl022->dma_tx_channel;
 945        struct dma_async_tx_descriptor *rxdesc;
 946        struct dma_async_tx_descriptor *txdesc;
 947
 948        /* Check that the channels are available */
 949        if (!rxchan || !txchan)
 950                return -ENODEV;
 951
 952        /*
 953         * If supplied, the DMA burstsize should equal the FIFO trigger level.
 954         * Notice that the DMA engine uses one-to-one mapping. Since we can
 955         * not trigger on 2 elements this needs explicit mapping rather than
 956         * calculation.
 957         */
 958        switch (pl022->rx_lev_trig) {
 959        case SSP_RX_1_OR_MORE_ELEM:
 960                rx_conf.src_maxburst = 1;
 961                break;
 962        case SSP_RX_4_OR_MORE_ELEM:
 963                rx_conf.src_maxburst = 4;
 964                break;
 965        case SSP_RX_8_OR_MORE_ELEM:
 966                rx_conf.src_maxburst = 8;
 967                break;
 968        case SSP_RX_16_OR_MORE_ELEM:
 969                rx_conf.src_maxburst = 16;
 970                break;
 971        case SSP_RX_32_OR_MORE_ELEM:
 972                rx_conf.src_maxburst = 32;
 973                break;
 974        default:
 975                rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1;
 976                break;
 977        }
 978
 979        switch (pl022->tx_lev_trig) {
 980        case SSP_TX_1_OR_MORE_EMPTY_LOC:
 981                tx_conf.dst_maxburst = 1;
 982                break;
 983        case SSP_TX_4_OR_MORE_EMPTY_LOC:
 984                tx_conf.dst_maxburst = 4;
 985                break;
 986        case SSP_TX_8_OR_MORE_EMPTY_LOC:
 987                tx_conf.dst_maxburst = 8;
 988                break;
 989        case SSP_TX_16_OR_MORE_EMPTY_LOC:
 990                tx_conf.dst_maxburst = 16;
 991                break;
 992        case SSP_TX_32_OR_MORE_EMPTY_LOC:
 993                tx_conf.dst_maxburst = 32;
 994                break;
 995        default:
 996                tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1;
 997                break;
 998        }
 999
1000        switch (pl022->read) {
1001        case READING_NULL:
1002                /* Use the same as for writing */
1003                rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
1004                break;
1005        case READING_U8:
1006                rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1007                break;
1008        case READING_U16:
1009                rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1010                break;
1011        case READING_U32:
1012                rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1013                break;
1014        }
1015
1016        switch (pl022->write) {
1017        case WRITING_NULL:
1018                /* Use the same as for reading */
1019                tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
1020                break;
1021        case WRITING_U8:
1022                tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1023                break;
1024        case WRITING_U16:
1025                tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1026                break;
1027        case WRITING_U32:
1028                tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1029                break;
1030        }
1031
1032        /* SPI pecularity: we need to read and write the same width */
1033        if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
1034                rx_conf.src_addr_width = tx_conf.dst_addr_width;
1035        if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
1036                tx_conf.dst_addr_width = rx_conf.src_addr_width;
1037        BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
1038
1039        dmaengine_slave_config(rxchan, &rx_conf);
1040        dmaengine_slave_config(txchan, &tx_conf);
1041
1042        /* Create sglists for the transfers */
1043        pages = DIV_ROUND_UP(pl022->cur_transfer->len, PAGE_SIZE);
1044        dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages);
1045
1046        ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_ATOMIC);
1047        if (ret)
1048                goto err_alloc_rx_sg;
1049
1050        ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_ATOMIC);
1051        if (ret)
1052                goto err_alloc_tx_sg;
1053
1054        /* Fill in the scatterlists for the RX+TX buffers */
1055        setup_dma_scatter(pl022, pl022->rx,
1056                          pl022->cur_transfer->len, &pl022->sgt_rx);
1057        setup_dma_scatter(pl022, pl022->tx,
1058                          pl022->cur_transfer->len, &pl022->sgt_tx);
1059
1060        /* Map DMA buffers */
1061        rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
1062                           pl022->sgt_rx.nents, DMA_FROM_DEVICE);
1063        if (!rx_sglen)
1064                goto err_rx_sgmap;
1065
1066        tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl,
1067                           pl022->sgt_tx.nents, DMA_TO_DEVICE);
1068        if (!tx_sglen)
1069                goto err_tx_sgmap;
1070
1071        /* Send both scatterlists */
1072        rxdesc = dmaengine_prep_slave_sg(rxchan,
1073                                      pl022->sgt_rx.sgl,
1074                                      rx_sglen,
1075                                      DMA_DEV_TO_MEM,
1076                                      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1077        if (!rxdesc)
1078                goto err_rxdesc;
1079
1080        txdesc = dmaengine_prep_slave_sg(txchan,
1081                                      pl022->sgt_tx.sgl,
1082                                      tx_sglen,
1083                                      DMA_MEM_TO_DEV,
1084                                      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1085        if (!txdesc)
1086                goto err_txdesc;
1087
1088        /* Put the callback on the RX transfer only, that should finish last */
1089        rxdesc->callback = dma_callback;
1090        rxdesc->callback_param = pl022;
1091
1092        /* Submit and fire RX and TX with TX last so we're ready to read! */
1093        dmaengine_submit(rxdesc);
1094        dmaengine_submit(txdesc);
1095        dma_async_issue_pending(rxchan);
1096        dma_async_issue_pending(txchan);
1097        pl022->dma_running = true;
1098
1099        return 0;
1100
1101err_txdesc:
1102        dmaengine_terminate_all(txchan);
1103err_rxdesc:
1104        dmaengine_terminate_all(rxchan);
1105        dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl,
1106                     pl022->sgt_tx.nents, DMA_TO_DEVICE);
1107err_tx_sgmap:
1108        dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
1109                     pl022->sgt_rx.nents, DMA_FROM_DEVICE);
1110err_rx_sgmap:
1111        sg_free_table(&pl022->sgt_tx);
1112err_alloc_tx_sg:
1113        sg_free_table(&pl022->sgt_rx);
1114err_alloc_rx_sg:
1115        return -ENOMEM;
1116}
1117
1118static int pl022_dma_probe(struct pl022 *pl022)
1119{
1120        dma_cap_mask_t mask;
1121
1122        /* Try to acquire a generic DMA engine slave channel */
1123        dma_cap_zero(mask);
1124        dma_cap_set(DMA_SLAVE, mask);
1125        /*
1126         * We need both RX and TX channels to do DMA, else do none
1127         * of them.
1128         */
1129        pl022->dma_rx_channel = dma_request_channel(mask,
1130                                            pl022->master_info->dma_filter,
1131                                            pl022->master_info->dma_rx_param);
1132        if (!pl022->dma_rx_channel) {
1133                dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n");
1134                goto err_no_rxchan;
1135        }
1136
1137        pl022->dma_tx_channel = dma_request_channel(mask,
1138                                            pl022->master_info->dma_filter,
1139                                            pl022->master_info->dma_tx_param);
1140        if (!pl022->dma_tx_channel) {
1141                dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n");
1142                goto err_no_txchan;
1143        }
1144
1145        pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1146        if (!pl022->dummypage)
1147                goto err_no_dummypage;
1148
1149        dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n",
1150                 dma_chan_name(pl022->dma_rx_channel),
1151                 dma_chan_name(pl022->dma_tx_channel));
1152
1153        return 0;
1154
1155err_no_dummypage:
1156        dma_release_channel(pl022->dma_tx_channel);
1157err_no_txchan:
1158        dma_release_channel(pl022->dma_rx_channel);
1159        pl022->dma_rx_channel = NULL;
1160err_no_rxchan:
1161        dev_err(&pl022->adev->dev,
1162                        "Failed to work in dma mode, work without dma!\n");
1163        return -ENODEV;
1164}
1165
1166static int pl022_dma_autoprobe(struct pl022 *pl022)
1167{
1168        struct device *dev = &pl022->adev->dev;
1169
1170        /* automatically configure DMA channels from platform, normally using DT */
1171        pl022->dma_rx_channel = dma_request_slave_channel(dev, "rx");
1172        if (!pl022->dma_rx_channel)
1173                goto err_no_rxchan;
1174
1175        pl022->dma_tx_channel = dma_request_slave_channel(dev, "tx");
1176        if (!pl022->dma_tx_channel)
1177                goto err_no_txchan;
1178
1179        pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1180        if (!pl022->dummypage)
1181                goto err_no_dummypage;
1182
1183        return 0;
1184
1185err_no_dummypage:
1186        dma_release_channel(pl022->dma_tx_channel);
1187        pl022->dma_tx_channel = NULL;
1188err_no_txchan:
1189        dma_release_channel(pl022->dma_rx_channel);
1190        pl022->dma_rx_channel = NULL;
1191err_no_rxchan:
1192        return -ENODEV;
1193}
1194                
1195static void terminate_dma(struct pl022 *pl022)
1196{
1197        struct dma_chan *rxchan = pl022->dma_rx_channel;
1198        struct dma_chan *txchan = pl022->dma_tx_channel;
1199
1200        dmaengine_terminate_all(rxchan);
1201        dmaengine_terminate_all(txchan);
1202        unmap_free_dma_scatter(pl022);
1203        pl022->dma_running = false;
1204}
1205
1206static void pl022_dma_remove(struct pl022 *pl022)
1207{
1208        if (pl022->dma_running)
1209                terminate_dma(pl022);
1210        if (pl022->dma_tx_channel)
1211                dma_release_channel(pl022->dma_tx_channel);
1212        if (pl022->dma_rx_channel)
1213                dma_release_channel(pl022->dma_rx_channel);
1214        kfree(pl022->dummypage);
1215}
1216
1217#else
1218static inline int configure_dma(struct pl022 *pl022)
1219{
1220        return -ENODEV;
1221}
1222
1223static inline int pl022_dma_autoprobe(struct pl022 *pl022)
1224{
1225        return 0;
1226}
1227
1228static inline int pl022_dma_probe(struct pl022 *pl022)
1229{
1230        return 0;
1231}
1232
1233static inline void pl022_dma_remove(struct pl022 *pl022)
1234{
1235}
1236#endif
1237
1238/**
1239 * pl022_interrupt_handler - Interrupt handler for SSP controller
1240 *
1241 * This function handles interrupts generated for an interrupt based transfer.
1242 * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
1243 * current message's state as STATE_ERROR and schedule the tasklet
1244 * pump_transfers which will do the postprocessing of the current message by
1245 * calling giveback(). Otherwise it reads data from RX FIFO till there is no
1246 * more data, and writes data in TX FIFO till it is not full. If we complete
1247 * the transfer we move to the next transfer and schedule the tasklet.
1248 */
1249static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
1250{
1251        struct pl022 *pl022 = dev_id;
1252        struct spi_message *msg = pl022->cur_msg;
1253        u16 irq_status = 0;
1254        u16 flag = 0;
1255
1256        if (unlikely(!msg)) {
1257                dev_err(&pl022->adev->dev,
1258                        "bad message state in interrupt handler");
1259                /* Never fail */
1260                return IRQ_HANDLED;
1261        }
1262
1263        /* Read the Interrupt Status Register */
1264        irq_status = readw(SSP_MIS(pl022->virtbase));
1265
1266        if (unlikely(!irq_status))
1267                return IRQ_NONE;
1268
1269        /*
1270         * This handles the FIFO interrupts, the timeout
1271         * interrupts are flatly ignored, they cannot be
1272         * trusted.
1273         */
1274        if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
1275                /*
1276                 * Overrun interrupt - bail out since our Data has been
1277                 * corrupted
1278                 */
1279                dev_err(&pl022->adev->dev, "FIFO overrun\n");
1280                if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
1281                        dev_err(&pl022->adev->dev,
1282                                "RXFIFO is full\n");
1283                if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
1284                        dev_err(&pl022->adev->dev,
1285                                "TXFIFO is full\n");
1286
1287                /*
1288                 * Disable and clear interrupts, disable SSP,
1289                 * mark message with bad status so it can be
1290                 * retried.
1291                 */
1292                writew(DISABLE_ALL_INTERRUPTS,
1293                       SSP_IMSC(pl022->virtbase));
1294                writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
1295                writew((readw(SSP_CR1(pl022->virtbase)) &
1296                        (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
1297                msg->state = STATE_ERROR;
1298
1299                /* Schedule message queue handler */
1300                tasklet_schedule(&pl022->pump_transfers);
1301                return IRQ_HANDLED;
1302        }
1303
1304        readwriter(pl022);
1305
1306        if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
1307                flag = 1;
1308                /* Disable Transmit interrupt, enable receive interrupt */
1309                writew((readw(SSP_IMSC(pl022->virtbase)) &
1310                       ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM,
1311                       SSP_IMSC(pl022->virtbase));
1312        }
1313
1314        /*
1315         * Since all transactions must write as much as shall be read,
1316         * we can conclude the entire transaction once RX is complete.
1317         * At this point, all TX will always be finished.
1318         */
1319        if (pl022->rx >= pl022->rx_end) {
1320                writew(DISABLE_ALL_INTERRUPTS,
1321                       SSP_IMSC(pl022->virtbase));
1322                writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
1323                if (unlikely(pl022->rx > pl022->rx_end)) {
1324                        dev_warn(&pl022->adev->dev, "read %u surplus "
1325                                 "bytes (did you request an odd "
1326                                 "number of bytes on a 16bit bus?)\n",
1327                                 (u32) (pl022->rx - pl022->rx_end));
1328                }
1329                /* Update total bytes transferred */
1330                msg->actual_length += pl022->cur_transfer->len;
1331                if (pl022->cur_transfer->cs_change)
1332                        pl022_cs_control(pl022, SSP_CHIP_DESELECT);
1333                /* Move to next transfer */
1334                msg->state = next_transfer(pl022);
1335                tasklet_schedule(&pl022->pump_transfers);
1336                return IRQ_HANDLED;
1337        }
1338
1339        return IRQ_HANDLED;
1340}
1341
1342/**
1343 * This sets up the pointers to memory for the next message to
1344 * send out on the SPI bus.
1345 */
1346static int set_up_next_transfer(struct pl022 *pl022,
1347                                struct spi_transfer *transfer)
1348{
1349        int residue;
1350
1351        /* Sanity check the message for this bus width */
1352        residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
1353        if (unlikely(residue != 0)) {
1354                dev_err(&pl022->adev->dev,
1355                        "message of %u bytes to transmit but the current "
1356                        "chip bus has a data width of %u bytes!\n",
1357                        pl022->cur_transfer->len,
1358                        pl022->cur_chip->n_bytes);
1359                dev_err(&pl022->adev->dev, "skipping this message\n");
1360                return -EIO;
1361        }
1362        pl022->tx = (void *)transfer->tx_buf;
1363        pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
1364        pl022->rx = (void *)transfer->rx_buf;
1365        pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
1366        pl022->write =
1367            pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
1368        pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
1369        return 0;
1370}
1371
1372/**
1373 * pump_transfers - Tasklet function which schedules next transfer
1374 * when running in interrupt or DMA transfer mode.
1375 * @data: SSP driver private data structure
1376 *
1377 */
1378static void pump_transfers(unsigned long data)
1379{
1380        struct pl022 *pl022 = (struct pl022 *) data;
1381        struct spi_message *message = NULL;
1382        struct spi_transfer *transfer = NULL;
1383        struct spi_transfer *previous = NULL;
1384
1385        /* Get current state information */
1386        message = pl022->cur_msg;
1387        transfer = pl022->cur_transfer;
1388
1389        /* Handle for abort */
1390        if (message->state == STATE_ERROR) {
1391                message->status = -EIO;
1392                giveback(pl022);
1393                return;
1394        }
1395
1396        /* Handle end of message */
1397        if (message->state == STATE_DONE) {
1398                message->status = 0;
1399                giveback(pl022);
1400                return;
1401        }
1402
1403        /* Delay if requested at end of transfer before CS change */
1404        if (message->state == STATE_RUNNING) {
1405                previous = list_entry(transfer->transfer_list.prev,
1406                                        struct spi_transfer,
1407                                        transfer_list);
1408                if (previous->delay_usecs)
1409                        /*
1410                         * FIXME: This runs in interrupt context.
1411                         * Is this really smart?
1412                         */
1413                        udelay(previous->delay_usecs);
1414
1415                /* Reselect chip select only if cs_change was requested */
1416                if (previous->cs_change)
1417                        pl022_cs_control(pl022, SSP_CHIP_SELECT);
1418        } else {
1419                /* STATE_START */
1420                message->state = STATE_RUNNING;
1421        }
1422
1423        if (set_up_next_transfer(pl022, transfer)) {
1424                message->state = STATE_ERROR;
1425                message->status = -EIO;
1426                giveback(pl022);
1427                return;
1428        }
1429        /* Flush the FIFOs and let's go! */
1430        flush(pl022);
1431
1432        if (pl022->cur_chip->enable_dma) {
1433                if (configure_dma(pl022)) {
1434                        dev_dbg(&pl022->adev->dev,
1435                                "configuration of DMA failed, fall back to interrupt mode\n");
1436                        goto err_config_dma;
1437                }
1438                return;
1439        }
1440
1441err_config_dma:
1442        /* enable all interrupts except RX */
1443        writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase));
1444}
1445
1446static void do_interrupt_dma_transfer(struct pl022 *pl022)
1447{
1448        /*
1449         * Default is to enable all interrupts except RX -
1450         * this will be enabled once TX is complete
1451         */
1452        u32 irqflags = (u32)(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM);
1453
1454        /* Enable target chip, if not already active */
1455        if (!pl022->next_msg_cs_active)
1456                pl022_cs_control(pl022, SSP_CHIP_SELECT);
1457
1458        if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
1459                /* Error path */
1460                pl022->cur_msg->state = STATE_ERROR;
1461                pl022->cur_msg->status = -EIO;
1462                giveback(pl022);
1463                return;
1464        }
1465        /* If we're using DMA, set up DMA here */
1466        if (pl022->cur_chip->enable_dma) {
1467                /* Configure DMA transfer */
1468                if (configure_dma(pl022)) {
1469                        dev_dbg(&pl022->adev->dev,
1470                                "configuration of DMA failed, fall back to interrupt mode\n");
1471                        goto err_config_dma;
1472                }
1473                /* Disable interrupts in DMA mode, IRQ from DMA controller */
1474                irqflags = DISABLE_ALL_INTERRUPTS;
1475        }
1476err_config_dma:
1477        /* Enable SSP, turn on interrupts */
1478        writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1479               SSP_CR1(pl022->virtbase));
1480        writew(irqflags, SSP_IMSC(pl022->virtbase));
1481}
1482
1483static void do_polling_transfer(struct pl022 *pl022)
1484{
1485        struct spi_message *message = NULL;
1486        struct spi_transfer *transfer = NULL;
1487        struct spi_transfer *previous = NULL;
1488        struct chip_data *chip;
1489        unsigned long time, timeout;
1490
1491        chip = pl022->cur_chip;
1492        message = pl022->cur_msg;
1493
1494        while (message->state != STATE_DONE) {
1495                /* Handle for abort */
1496                if (message->state == STATE_ERROR)
1497                        break;
1498                transfer = pl022->cur_transfer;
1499
1500                /* Delay if requested at end of transfer */
1501                if (message->state == STATE_RUNNING) {
1502                        previous =
1503                            list_entry(transfer->transfer_list.prev,
1504                                       struct spi_transfer, transfer_list);
1505                        if (previous->delay_usecs)
1506                                udelay(previous->delay_usecs);
1507                        if (previous->cs_change)
1508                                pl022_cs_control(pl022, SSP_CHIP_SELECT);
1509                } else {
1510                        /* STATE_START */
1511                        message->state = STATE_RUNNING;
1512                        if (!pl022->next_msg_cs_active)
1513                                pl022_cs_control(pl022, SSP_CHIP_SELECT);
1514                }
1515
1516                /* Configuration Changing Per Transfer */
1517                if (set_up_next_transfer(pl022, transfer)) {
1518                        /* Error path */
1519                        message->state = STATE_ERROR;
1520                        break;
1521                }
1522                /* Flush FIFOs and enable SSP */
1523                flush(pl022);
1524                writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1525                       SSP_CR1(pl022->virtbase));
1526
1527                dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
1528
1529                timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
1530                while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
1531                        time = jiffies;
1532                        readwriter(pl022);
1533                        if (time_after(time, timeout)) {
1534                                dev_warn(&pl022->adev->dev,
1535                                "%s: timeout!\n", __func__);
1536                                message->state = STATE_ERROR;
1537                                goto out;
1538                        }
1539                        cpu_relax();
1540                }
1541
1542                /* Update total byte transferred */
1543                message->actual_length += pl022->cur_transfer->len;
1544                if (pl022->cur_transfer->cs_change)
1545                        pl022_cs_control(pl022, SSP_CHIP_DESELECT);
1546                /* Move to next transfer */
1547                message->state = next_transfer(pl022);
1548        }
1549out:
1550        /* Handle end of message */
1551        if (message->state == STATE_DONE)
1552                message->status = 0;
1553        else
1554                message->status = -EIO;
1555
1556        giveback(pl022);
1557        return;
1558}
1559
1560static int pl022_transfer_one_message(struct spi_master *master,
1561                                      struct spi_message *msg)
1562{
1563        struct pl022 *pl022 = spi_master_get_devdata(master);
1564
1565        /* Initial message state */
1566        pl022->cur_msg = msg;
1567        msg->state = STATE_START;
1568
1569        pl022->cur_transfer = list_entry(msg->transfers.next,
1570                                         struct spi_transfer, transfer_list);
1571
1572        /* Setup the SPI using the per chip configuration */
1573        pl022->cur_chip = spi_get_ctldata(msg->spi);
1574        pl022->cur_cs = pl022->chipselects[msg->spi->chip_select];
1575
1576        restore_state(pl022);
1577        flush(pl022);
1578
1579        if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
1580                do_polling_transfer(pl022);
1581        else
1582                do_interrupt_dma_transfer(pl022);
1583
1584        return 0;
1585}
1586
1587static int pl022_unprepare_transfer_hardware(struct spi_master *master)
1588{
1589        struct pl022 *pl022 = spi_master_get_devdata(master);
1590
1591        /* nothing more to do - disable spi/ssp and power off */
1592        writew((readw(SSP_CR1(pl022->virtbase)) &
1593                (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
1594
1595        return 0;
1596}
1597
1598static int verify_controller_parameters(struct pl022 *pl022,
1599                                struct pl022_config_chip const *chip_info)
1600{
1601        if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
1602            || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
1603                dev_err(&pl022->adev->dev,
1604                        "interface is configured incorrectly\n");
1605                return -EINVAL;
1606        }
1607        if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
1608            (!pl022->vendor->unidir)) {
1609                dev_err(&pl022->adev->dev,
1610                        "unidirectional mode not supported in this "
1611                        "hardware version\n");
1612                return -EINVAL;
1613        }
1614        if ((chip_info->hierarchy != SSP_MASTER)
1615            && (chip_info->hierarchy != SSP_SLAVE)) {
1616                dev_err(&pl022->adev->dev,
1617                        "hierarchy is configured incorrectly\n");
1618                return -EINVAL;
1619        }
1620        if ((chip_info->com_mode != INTERRUPT_TRANSFER)
1621            && (chip_info->com_mode != DMA_TRANSFER)
1622            && (chip_info->com_mode != POLLING_TRANSFER)) {
1623                dev_err(&pl022->adev->dev,
1624                        "Communication mode is configured incorrectly\n");
1625                return -EINVAL;
1626        }
1627        switch (chip_info->rx_lev_trig) {
1628        case SSP_RX_1_OR_MORE_ELEM:
1629        case SSP_RX_4_OR_MORE_ELEM:
1630        case SSP_RX_8_OR_MORE_ELEM:
1631                /* These are always OK, all variants can handle this */
1632                break;
1633        case SSP_RX_16_OR_MORE_ELEM:
1634                if (pl022->vendor->fifodepth < 16) {
1635                        dev_err(&pl022->adev->dev,
1636                        "RX FIFO Trigger Level is configured incorrectly\n");
1637                        return -EINVAL;
1638                }
1639                break;
1640        case SSP_RX_32_OR_MORE_ELEM:
1641                if (pl022->vendor->fifodepth < 32) {
1642                        dev_err(&pl022->adev->dev,
1643                        "RX FIFO Trigger Level is configured incorrectly\n");
1644                        return -EINVAL;
1645                }
1646                break;
1647        default:
1648                dev_err(&pl022->adev->dev,
1649                        "RX FIFO Trigger Level is configured incorrectly\n");
1650                return -EINVAL;
1651        }
1652        switch (chip_info->tx_lev_trig) {
1653        case SSP_TX_1_OR_MORE_EMPTY_LOC:
1654        case SSP_TX_4_OR_MORE_EMPTY_LOC:
1655        case SSP_TX_8_OR_MORE_EMPTY_LOC:
1656                /* These are always OK, all variants can handle this */
1657                break;
1658        case SSP_TX_16_OR_MORE_EMPTY_LOC:
1659                if (pl022->vendor->fifodepth < 16) {
1660                        dev_err(&pl022->adev->dev,
1661                        "TX FIFO Trigger Level is configured incorrectly\n");
1662                        return -EINVAL;
1663                }
1664                break;
1665        case SSP_TX_32_OR_MORE_EMPTY_LOC:
1666                if (pl022->vendor->fifodepth < 32) {
1667                        dev_err(&pl022->adev->dev,
1668                        "TX FIFO Trigger Level is configured incorrectly\n");
1669                        return -EINVAL;
1670                }
1671                break;
1672        default:
1673                dev_err(&pl022->adev->dev,
1674                        "TX FIFO Trigger Level is configured incorrectly\n");
1675                return -EINVAL;
1676        }
1677        if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
1678                if ((chip_info->ctrl_len < SSP_BITS_4)
1679                    || (chip_info->ctrl_len > SSP_BITS_32)) {
1680                        dev_err(&pl022->adev->dev,
1681                                "CTRL LEN is configured incorrectly\n");
1682                        return -EINVAL;
1683                }
1684                if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
1685                    && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
1686                        dev_err(&pl022->adev->dev,
1687                                "Wait State is configured incorrectly\n");
1688                        return -EINVAL;
1689                }
1690                /* Half duplex is only available in the ST Micro version */
1691                if (pl022->vendor->extended_cr) {
1692                        if ((chip_info->duplex !=
1693                             SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1694                            && (chip_info->duplex !=
1695                                SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
1696                                dev_err(&pl022->adev->dev,
1697                                        "Microwire duplex mode is configured incorrectly\n");
1698                                return -EINVAL;
1699                        }
1700                } else {
1701                        if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1702                                dev_err(&pl022->adev->dev,
1703                                        "Microwire half duplex mode requested,"
1704                                        " but this is only available in the"
1705                                        " ST version of PL022\n");
1706                        return -EINVAL;
1707                }
1708        }
1709        return 0;
1710}
1711
1712static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
1713{
1714        return rate / (cpsdvsr * (1 + scr));
1715}
1716
1717static int calculate_effective_freq(struct pl022 *pl022, int freq, struct
1718                                    ssp_clock_params * clk_freq)
1719{
1720        /* Lets calculate the frequency parameters */
1721        u16 cpsdvsr = CPSDVR_MIN, scr = SCR_MIN;
1722        u32 rate, max_tclk, min_tclk, best_freq = 0, best_cpsdvsr = 0,
1723                best_scr = 0, tmp, found = 0;
1724
1725        rate = clk_get_rate(pl022->clk);
1726        /* cpsdvscr = 2 & scr 0 */
1727        max_tclk = spi_rate(rate, CPSDVR_MIN, SCR_MIN);
1728        /* cpsdvsr = 254 & scr = 255 */
1729        min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX);
1730
1731        if (freq > max_tclk)
1732                dev_warn(&pl022->adev->dev,
1733                        "Max speed that can be programmed is %d Hz, you requested %d\n",
1734                        max_tclk, freq);
1735
1736        if (freq < min_tclk) {
1737                dev_err(&pl022->adev->dev,
1738                        "Requested frequency: %d Hz is less than minimum possible %d Hz\n",
1739                        freq, min_tclk);
1740                return -EINVAL;
1741        }
1742
1743        /*
1744         * best_freq will give closest possible available rate (<= requested
1745         * freq) for all values of scr & cpsdvsr.
1746         */
1747        while ((cpsdvsr <= CPSDVR_MAX) && !found) {
1748                while (scr <= SCR_MAX) {
1749                        tmp = spi_rate(rate, cpsdvsr, scr);
1750
1751                        if (tmp > freq) {
1752                                /* we need lower freq */
1753                                scr++;
1754                                continue;
1755                        }
1756
1757                        /*
1758                         * If found exact value, mark found and break.
1759                         * If found more closer value, update and break.
1760                         */
1761                        if (tmp > best_freq) {
1762                                best_freq = tmp;
1763                                best_cpsdvsr = cpsdvsr;
1764                                best_scr = scr;
1765
1766                                if (tmp == freq)
1767                                        found = 1;
1768                        }
1769                        /*
1770                         * increased scr will give lower rates, which are not
1771                         * required
1772                         */
1773                        break;
1774                }
1775                cpsdvsr += 2;
1776                scr = SCR_MIN;
1777        }
1778
1779        WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n",
1780                        freq);
1781
1782        clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF);
1783        clk_freq->scr = (u8) (best_scr & 0xFF);
1784        dev_dbg(&pl022->adev->dev,
1785                "SSP Target Frequency is: %u, Effective Frequency is %u\n",
1786                freq, best_freq);
1787        dev_dbg(&pl022->adev->dev, "SSP cpsdvsr = %d, scr = %d\n",
1788                clk_freq->cpsdvsr, clk_freq->scr);
1789
1790        return 0;
1791}
1792
1793/*
1794 * A piece of default chip info unless the platform
1795 * supplies it.
1796 */
1797static const struct pl022_config_chip pl022_default_chip_info = {
1798        .com_mode = POLLING_TRANSFER,
1799        .iface = SSP_INTERFACE_MOTOROLA_SPI,
1800        .hierarchy = SSP_SLAVE,
1801        .slave_tx_disable = DO_NOT_DRIVE_TX,
1802        .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
1803        .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
1804        .ctrl_len = SSP_BITS_8,
1805        .wait_state = SSP_MWIRE_WAIT_ZERO,
1806        .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
1807        .cs_control = null_cs_control,
1808};
1809
1810/**
1811 * pl022_setup - setup function registered to SPI master framework
1812 * @spi: spi device which is requesting setup
1813 *
1814 * This function is registered to the SPI framework for this SPI master
1815 * controller. If it is the first time when setup is called by this device,
1816 * this function will initialize the runtime state for this chip and save
1817 * the same in the device structure. Else it will update the runtime info
1818 * with the updated chip info. Nothing is really being written to the
1819 * controller hardware here, that is not done until the actual transfer
1820 * commence.
1821 */
1822static int pl022_setup(struct spi_device *spi)
1823{
1824        struct pl022_config_chip const *chip_info;
1825        struct pl022_config_chip chip_info_dt;
1826        struct chip_data *chip;
1827        struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0};
1828        int status = 0;
1829        struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1830        unsigned int bits = spi->bits_per_word;
1831        u32 tmp;
1832        struct device_node *np = spi->dev.of_node;
1833
1834        if (!spi->max_speed_hz)
1835                return -EINVAL;
1836
1837        /* Get controller_state if one is supplied */
1838        chip = spi_get_ctldata(spi);
1839
1840        if (chip == NULL) {
1841                chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1842                if (!chip)
1843                        return -ENOMEM;
1844                dev_dbg(&spi->dev,
1845                        "allocated memory for controller's runtime state\n");
1846        }
1847
1848        /* Get controller data if one is supplied */
1849        chip_info = spi->controller_data;
1850
1851        if (chip_info == NULL) {
1852                if (np) {
1853                        chip_info_dt = pl022_default_chip_info;
1854
1855                        chip_info_dt.hierarchy = SSP_MASTER;
1856                        of_property_read_u32(np, "pl022,interface",
1857                                &chip_info_dt.iface);
1858                        of_property_read_u32(np, "pl022,com-mode",
1859                                &chip_info_dt.com_mode);
1860                        of_property_read_u32(np, "pl022,rx-level-trig",
1861                                &chip_info_dt.rx_lev_trig);
1862                        of_property_read_u32(np, "pl022,tx-level-trig",
1863                                &chip_info_dt.tx_lev_trig);
1864                        of_property_read_u32(np, "pl022,ctrl-len",
1865                                &chip_info_dt.ctrl_len);
1866                        of_property_read_u32(np, "pl022,wait-state",
1867                                &chip_info_dt.wait_state);
1868                        of_property_read_u32(np, "pl022,duplex",
1869                                &chip_info_dt.duplex);
1870
1871                        chip_info = &chip_info_dt;
1872                } else {
1873                        chip_info = &pl022_default_chip_info;
1874                        /* spi_board_info.controller_data not is supplied */
1875                        dev_dbg(&spi->dev,
1876                                "using default controller_data settings\n");
1877                }
1878        } else
1879                dev_dbg(&spi->dev,
1880                        "using user supplied controller_data settings\n");
1881
1882        /*
1883         * We can override with custom divisors, else we use the board
1884         * frequency setting
1885         */
1886        if ((0 == chip_info->clk_freq.cpsdvsr)
1887            && (0 == chip_info->clk_freq.scr)) {
1888                status = calculate_effective_freq(pl022,
1889                                                  spi->max_speed_hz,
1890                                                  &clk_freq);
1891                if (status < 0)
1892                        goto err_config_params;
1893        } else {
1894                memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq));
1895                if ((clk_freq.cpsdvsr % 2) != 0)
1896                        clk_freq.cpsdvsr =
1897                                clk_freq.cpsdvsr - 1;
1898        }
1899        if ((clk_freq.cpsdvsr < CPSDVR_MIN)
1900            || (clk_freq.cpsdvsr > CPSDVR_MAX)) {
1901                status = -EINVAL;
1902                dev_err(&spi->dev,
1903                        "cpsdvsr is configured incorrectly\n");
1904                goto err_config_params;
1905        }
1906
1907        status = verify_controller_parameters(pl022, chip_info);
1908        if (status) {
1909                dev_err(&spi->dev, "controller data is incorrect");
1910                goto err_config_params;
1911        }
1912
1913        pl022->rx_lev_trig = chip_info->rx_lev_trig;
1914        pl022->tx_lev_trig = chip_info->tx_lev_trig;
1915
1916        /* Now set controller state based on controller data */
1917        chip->xfer_type = chip_info->com_mode;
1918        if (!chip_info->cs_control) {
1919                chip->cs_control = null_cs_control;
1920                if (!gpio_is_valid(pl022->chipselects[spi->chip_select]))
1921                        dev_warn(&spi->dev,
1922                                 "invalid chip select\n");
1923        } else
1924                chip->cs_control = chip_info->cs_control;
1925
1926        /* Check bits per word with vendor specific range */
1927        if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) {
1928                status = -ENOTSUPP;
1929                dev_err(&spi->dev, "illegal data size for this controller!\n");
1930                dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
1931                                pl022->vendor->max_bpw);
1932                goto err_config_params;
1933        } else if (bits <= 8) {
1934                dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
1935                chip->n_bytes = 1;
1936                chip->read = READING_U8;
1937                chip->write = WRITING_U8;
1938        } else if (bits <= 16) {
1939                dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
1940                chip->n_bytes = 2;
1941                chip->read = READING_U16;
1942                chip->write = WRITING_U16;
1943        } else {
1944                dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
1945                chip->n_bytes = 4;
1946                chip->read = READING_U32;
1947                chip->write = WRITING_U32;
1948        }
1949
1950        /* Now Initialize all register settings required for this chip */
1951        chip->cr0 = 0;
1952        chip->cr1 = 0;
1953        chip->dmacr = 0;
1954        chip->cpsr = 0;
1955        if ((chip_info->com_mode == DMA_TRANSFER)
1956            && ((pl022->master_info)->enable_dma)) {
1957                chip->enable_dma = true;
1958                dev_dbg(&spi->dev, "DMA mode set in controller state\n");
1959                SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1960                               SSP_DMACR_MASK_RXDMAE, 0);
1961                SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1962                               SSP_DMACR_MASK_TXDMAE, 1);
1963        } else {
1964                chip->enable_dma = false;
1965                dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
1966                SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1967                               SSP_DMACR_MASK_RXDMAE, 0);
1968                SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1969                               SSP_DMACR_MASK_TXDMAE, 1);
1970        }
1971
1972        chip->cpsr = clk_freq.cpsdvsr;
1973
1974        /* Special setup for the ST micro extended control registers */
1975        if (pl022->vendor->extended_cr) {
1976                u32 etx;
1977
1978                if (pl022->vendor->pl023) {
1979                        /* These bits are only in the PL023 */
1980                        SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay,
1981                                       SSP_CR1_MASK_FBCLKDEL_ST, 13);
1982                } else {
1983                        /* These bits are in the PL022 but not PL023 */
1984                        SSP_WRITE_BITS(chip->cr0, chip_info->duplex,
1985                                       SSP_CR0_MASK_HALFDUP_ST, 5);
1986                        SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len,
1987                                       SSP_CR0_MASK_CSS_ST, 16);
1988                        SSP_WRITE_BITS(chip->cr0, chip_info->iface,
1989                                       SSP_CR0_MASK_FRF_ST, 21);
1990                        SSP_WRITE_BITS(chip->cr1, chip_info->wait_state,
1991                                       SSP_CR1_MASK_MWAIT_ST, 6);
1992                }
1993                SSP_WRITE_BITS(chip->cr0, bits - 1,
1994                               SSP_CR0_MASK_DSS_ST, 0);
1995
1996                if (spi->mode & SPI_LSB_FIRST) {
1997                        tmp = SSP_RX_LSB;
1998                        etx = SSP_TX_LSB;
1999                } else {
2000                        tmp = SSP_RX_MSB;
2001                        etx = SSP_TX_MSB;
2002                }
2003                SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4);
2004                SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5);
2005                SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig,
2006                               SSP_CR1_MASK_RXIFLSEL_ST, 7);
2007                SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig,
2008                               SSP_CR1_MASK_TXIFLSEL_ST, 10);
2009        } else {
2010                SSP_WRITE_BITS(chip->cr0, bits - 1,
2011                               SSP_CR0_MASK_DSS, 0);
2012                SSP_WRITE_BITS(chip->cr0, chip_info->iface,
2013                               SSP_CR0_MASK_FRF, 4);
2014        }
2015
2016        /* Stuff that is common for all versions */
2017        if (spi->mode & SPI_CPOL)
2018                tmp = SSP_CLK_POL_IDLE_HIGH;
2019        else
2020                tmp = SSP_CLK_POL_IDLE_LOW;
2021        SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6);
2022
2023        if (spi->mode & SPI_CPHA)
2024                tmp = SSP_CLK_SECOND_EDGE;
2025        else
2026                tmp = SSP_CLK_FIRST_EDGE;
2027        SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7);
2028
2029        SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8);
2030        /* Loopback is available on all versions except PL023 */
2031        if (pl022->vendor->loopback) {
2032                if (spi->mode & SPI_LOOP)
2033                        tmp = LOOPBACK_ENABLED;
2034                else
2035                        tmp = LOOPBACK_DISABLED;
2036                SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0);
2037        }
2038        SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
2039        SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
2040        SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD,
2041                3);
2042
2043        /* Save controller_state */
2044        spi_set_ctldata(spi, chip);
2045        return status;
2046 err_config_params:
2047        spi_set_ctldata(spi, NULL);
2048        kfree(chip);
2049        return status;
2050}
2051
2052/**
2053 * pl022_cleanup - cleanup function registered to SPI master framework
2054 * @spi: spi device which is requesting cleanup
2055 *
2056 * This function is registered to the SPI framework for this SPI master
2057 * controller. It will free the runtime state of chip.
2058 */
2059static void pl022_cleanup(struct spi_device *spi)
2060{
2061        struct chip_data *chip = spi_get_ctldata(spi);
2062
2063        spi_set_ctldata(spi, NULL);
2064        kfree(chip);
2065}
2066
2067static struct pl022_ssp_controller *
2068pl022_platform_data_dt_get(struct device *dev)
2069{
2070        struct device_node *np = dev->of_node;
2071        struct pl022_ssp_controller *pd;
2072        u32 tmp;
2073
2074        if (!np) {
2075                dev_err(dev, "no dt node defined\n");
2076                return NULL;
2077        }
2078
2079        pd = devm_kzalloc(dev, sizeof(struct pl022_ssp_controller), GFP_KERNEL);
2080        if (!pd)
2081                return NULL;
2082
2083        pd->bus_id = -1;
2084        pd->enable_dma = 1;
2085        of_property_read_u32(np, "num-cs", &tmp);
2086        pd->num_chipselect = tmp;
2087        of_property_read_u32(np, "pl022,autosuspend-delay",
2088                             &pd->autosuspend_delay);
2089        pd->rt = of_property_read_bool(np, "pl022,rt");
2090
2091        return pd;
2092}
2093
2094static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
2095{
2096        struct device *dev = &adev->dev;
2097        struct pl022_ssp_controller *platform_info =
2098                        dev_get_platdata(&adev->dev);
2099        struct spi_master *master;
2100        struct pl022 *pl022 = NULL;     /*Data for this driver */
2101        struct device_node *np = adev->dev.of_node;
2102        int status = 0, i, num_cs;
2103
2104        dev_info(&adev->dev,
2105                 "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
2106        if (!platform_info && IS_ENABLED(CONFIG_OF))
2107                platform_info = pl022_platform_data_dt_get(dev);
2108
2109        if (!platform_info) {
2110                dev_err(dev, "probe: no platform data defined\n");
2111                return -ENODEV;
2112        }
2113
2114        if (platform_info->num_chipselect) {
2115                num_cs = platform_info->num_chipselect;
2116        } else {
2117                dev_err(dev, "probe: no chip select defined\n");
2118                return -ENODEV;
2119        }
2120
2121        /* Allocate master with space for data */
2122        master = spi_alloc_master(dev, sizeof(struct pl022));
2123        if (master == NULL) {
2124                dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
2125                return -ENOMEM;
2126        }
2127
2128        pl022 = spi_master_get_devdata(master);
2129        pl022->master = master;
2130        pl022->master_info = platform_info;
2131        pl022->adev = adev;
2132        pl022->vendor = id->data;
2133        pl022->chipselects = devm_kzalloc(dev, num_cs * sizeof(int),
2134                                          GFP_KERNEL);
2135        if (!pl022->chipselects) {
2136                status = -ENOMEM;
2137                goto err_no_mem;
2138        }
2139
2140        /*
2141         * Bus Number Which has been Assigned to this SSP controller
2142         * on this board
2143         */
2144        master->bus_num = platform_info->bus_id;
2145        master->num_chipselect = num_cs;
2146        master->cleanup = pl022_cleanup;
2147        master->setup = pl022_setup;
2148        master->auto_runtime_pm = true;
2149        master->transfer_one_message = pl022_transfer_one_message;
2150        master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
2151        master->rt = platform_info->rt;
2152        master->dev.of_node = dev->of_node;
2153
2154        if (platform_info->num_chipselect && platform_info->chipselects) {
2155                for (i = 0; i < num_cs; i++)
2156                        pl022->chipselects[i] = platform_info->chipselects[i];
2157        } else if (pl022->vendor->internal_cs_ctrl) {
2158                for (i = 0; i < num_cs; i++)
2159                        pl022->chipselects[i] = i;
2160        } else if (IS_ENABLED(CONFIG_OF)) {
2161                for (i = 0; i < num_cs; i++) {
2162                        int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
2163
2164                        if (cs_gpio == -EPROBE_DEFER) {
2165                                status = -EPROBE_DEFER;
2166                                goto err_no_gpio;
2167                        }
2168
2169                        pl022->chipselects[i] = cs_gpio;
2170
2171                        if (gpio_is_valid(cs_gpio)) {
2172                                if (devm_gpio_request(dev, cs_gpio, "ssp-pl022"))
2173                                        dev_err(&adev->dev,
2174                                                "could not request %d gpio\n",
2175                                                cs_gpio);
2176                                else if (gpio_direction_output(cs_gpio, 1))
2177                                        dev_err(&adev->dev,
2178                                                "could not set gpio %d as output\n",
2179                                                cs_gpio);
2180                        }
2181                }
2182        }
2183
2184        /*
2185         * Supports mode 0-3, loopback, and active low CS. Transfers are
2186         * always MS bit first on the original pl022.
2187         */
2188        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
2189        if (pl022->vendor->extended_cr)
2190                master->mode_bits |= SPI_LSB_FIRST;
2191
2192        dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
2193
2194        status = amba_request_regions(adev, NULL);
2195        if (status)
2196                goto err_no_ioregion;
2197
2198        pl022->phybase = adev->res.start;
2199        pl022->virtbase = devm_ioremap(dev, adev->res.start,
2200                                       resource_size(&adev->res));
2201        if (pl022->virtbase == NULL) {
2202                status = -ENOMEM;
2203                goto err_no_ioremap;
2204        }
2205        dev_info(&adev->dev, "mapped registers from %pa to %p\n",
2206                &adev->res.start, pl022->virtbase);
2207
2208        pl022->clk = devm_clk_get(&adev->dev, NULL);
2209        if (IS_ERR(pl022->clk)) {
2210                status = PTR_ERR(pl022->clk);
2211                dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
2212                goto err_no_clk;
2213        }
2214
2215        status = clk_prepare_enable(pl022->clk);
2216        if (status) {
2217                dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
2218                goto err_no_clk_en;
2219        }
2220
2221        /* Initialize transfer pump */
2222        tasklet_init(&pl022->pump_transfers, pump_transfers,
2223                     (unsigned long)pl022);
2224
2225        /* Disable SSP */
2226        writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
2227               SSP_CR1(pl022->virtbase));
2228        load_ssp_default_config(pl022);
2229
2230        status = devm_request_irq(dev, adev->irq[0], pl022_interrupt_handler,
2231                                  0, "pl022", pl022);
2232        if (status < 0) {
2233                dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
2234                goto err_no_irq;
2235        }
2236
2237        /* Get DMA channels, try autoconfiguration first */
2238        status = pl022_dma_autoprobe(pl022);
2239
2240        /* If that failed, use channels from platform_info */
2241        if (status == 0)
2242                platform_info->enable_dma = 1;
2243        else if (platform_info->enable_dma) {
2244                status = pl022_dma_probe(pl022);
2245                if (status != 0)
2246                        platform_info->enable_dma = 0;
2247        }
2248
2249        /* Register with the SPI framework */
2250        amba_set_drvdata(adev, pl022);
2251        status = devm_spi_register_master(&adev->dev, master);
2252        if (status != 0) {
2253                dev_err(&adev->dev,
2254                        "probe - problem registering spi master\n");
2255                goto err_spi_register;
2256        }
2257        dev_dbg(dev, "probe succeeded\n");
2258
2259        /* let runtime pm put suspend */
2260        if (platform_info->autosuspend_delay > 0) {
2261                dev_info(&adev->dev,
2262                        "will use autosuspend for runtime pm, delay %dms\n",
2263                        platform_info->autosuspend_delay);
2264                pm_runtime_set_autosuspend_delay(dev,
2265                        platform_info->autosuspend_delay);
2266                pm_runtime_use_autosuspend(dev);
2267        }
2268        pm_runtime_put(dev);
2269
2270        return 0;
2271
2272 err_spi_register:
2273        if (platform_info->enable_dma)
2274                pl022_dma_remove(pl022);
2275 err_no_irq:
2276        clk_disable_unprepare(pl022->clk);
2277 err_no_clk_en:
2278 err_no_clk:
2279 err_no_ioremap:
2280        amba_release_regions(adev);
2281 err_no_ioregion:
2282 err_no_gpio:
2283 err_no_mem:
2284        spi_master_put(master);
2285        return status;
2286}
2287
2288static int
2289pl022_remove(struct amba_device *adev)
2290{
2291        struct pl022 *pl022 = amba_get_drvdata(adev);
2292
2293        if (!pl022)
2294                return 0;
2295
2296        /*
2297         * undo pm_runtime_put() in probe.  I assume that we're not
2298         * accessing the primecell here.
2299         */
2300        pm_runtime_get_noresume(&adev->dev);
2301
2302        load_ssp_default_config(pl022);
2303        if (pl022->master_info->enable_dma)
2304                pl022_dma_remove(pl022);
2305
2306        clk_disable_unprepare(pl022->clk);
2307        amba_release_regions(adev);
2308        tasklet_disable(&pl022->pump_transfers);
2309        return 0;
2310}
2311
2312#ifdef CONFIG_PM_SLEEP
2313static int pl022_suspend(struct device *dev)
2314{
2315        struct pl022 *pl022 = dev_get_drvdata(dev);
2316        int ret;
2317
2318        ret = spi_master_suspend(pl022->master);
2319        if (ret) {
2320                dev_warn(dev, "cannot suspend master\n");
2321                return ret;
2322        }
2323
2324        ret = pm_runtime_force_suspend(dev);
2325        if (ret) {
2326                spi_master_resume(pl022->master);
2327                return ret;
2328        }
2329
2330        pinctrl_pm_select_sleep_state(dev);
2331
2332        dev_dbg(dev, "suspended\n");
2333        return 0;
2334}
2335
2336static int pl022_resume(struct device *dev)
2337{
2338        struct pl022 *pl022 = dev_get_drvdata(dev);
2339        int ret;
2340
2341        ret = pm_runtime_force_resume(dev);
2342        if (ret)
2343                dev_err(dev, "problem resuming\n");
2344
2345        /* Start the queue running */
2346        ret = spi_master_resume(pl022->master);
2347        if (ret)
2348                dev_err(dev, "problem starting queue (%d)\n", ret);
2349        else
2350                dev_dbg(dev, "resumed\n");
2351
2352        return ret;
2353}
2354#endif
2355
2356#ifdef CONFIG_PM
2357static int pl022_runtime_suspend(struct device *dev)
2358{
2359        struct pl022 *pl022 = dev_get_drvdata(dev);
2360
2361        clk_disable_unprepare(pl022->clk);
2362        pinctrl_pm_select_idle_state(dev);
2363
2364        return 0;
2365}
2366
2367static int pl022_runtime_resume(struct device *dev)
2368{
2369        struct pl022 *pl022 = dev_get_drvdata(dev);
2370
2371        pinctrl_pm_select_default_state(dev);
2372        clk_prepare_enable(pl022->clk);
2373
2374        return 0;
2375}
2376#endif
2377
2378static const struct dev_pm_ops pl022_dev_pm_ops = {
2379        SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume)
2380        SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
2381};
2382
2383static struct vendor_data vendor_arm = {
2384        .fifodepth = 8,
2385        .max_bpw = 16,
2386        .unidir = false,
2387        .extended_cr = false,
2388        .pl023 = false,
2389        .loopback = true,
2390        .internal_cs_ctrl = false,
2391};
2392
2393static struct vendor_data vendor_st = {
2394        .fifodepth = 32,
2395        .max_bpw = 32,
2396        .unidir = false,
2397        .extended_cr = true,
2398        .pl023 = false,
2399        .loopback = true,
2400        .internal_cs_ctrl = false,
2401};
2402
2403static struct vendor_data vendor_st_pl023 = {
2404        .fifodepth = 32,
2405        .max_bpw = 32,
2406        .unidir = false,
2407        .extended_cr = true,
2408        .pl023 = true,
2409        .loopback = false,
2410        .internal_cs_ctrl = false,
2411};
2412
2413static struct vendor_data vendor_lsi = {
2414        .fifodepth = 8,
2415        .max_bpw = 16,
2416        .unidir = false,
2417        .extended_cr = false,
2418        .pl023 = false,
2419        .loopback = true,
2420        .internal_cs_ctrl = true,
2421};
2422
2423static struct amba_id pl022_ids[] = {
2424        {
2425                /*
2426                 * ARM PL022 variant, this has a 16bit wide
2427                 * and 8 locations deep TX/RX FIFO
2428                 */
2429                .id     = 0x00041022,
2430                .mask   = 0x000fffff,
2431                .data   = &vendor_arm,
2432        },
2433        {
2434                /*
2435                 * ST Micro derivative, this has 32bit wide
2436                 * and 32 locations deep TX/RX FIFO
2437                 */
2438                .id     = 0x01080022,
2439                .mask   = 0xffffffff,
2440                .data   = &vendor_st,
2441        },
2442        {
2443                /*
2444                 * ST-Ericsson derivative "PL023" (this is not
2445                 * an official ARM number), this is a PL022 SSP block
2446                 * stripped to SPI mode only, it has 32bit wide
2447                 * and 32 locations deep TX/RX FIFO but no extended
2448                 * CR0/CR1 register
2449                 */
2450                .id     = 0x00080023,
2451                .mask   = 0xffffffff,
2452                .data   = &vendor_st_pl023,
2453        },
2454        {
2455                /*
2456                 * PL022 variant that has a chip select control register whih
2457                 * allows control of 5 output signals nCS[0:4].
2458                 */
2459                .id     = 0x000b6022,
2460                .mask   = 0x000fffff,
2461                .data   = &vendor_lsi,
2462        },
2463        { 0, 0 },
2464};
2465
2466MODULE_DEVICE_TABLE(amba, pl022_ids);
2467
2468static struct amba_driver pl022_driver = {
2469        .drv = {
2470                .name   = "ssp-pl022",
2471                .pm     = &pl022_dev_pm_ops,
2472        },
2473        .id_table       = pl022_ids,
2474        .probe          = pl022_probe,
2475        .remove         = pl022_remove,
2476};
2477
2478static int __init pl022_init(void)
2479{
2480        return amba_driver_register(&pl022_driver);
2481}
2482subsys_initcall(pl022_init);
2483
2484static void __exit pl022_exit(void)
2485{
2486        amba_driver_unregister(&pl022_driver);
2487}
2488module_exit(pl022_exit);
2489
2490MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
2491MODULE_DESCRIPTION("PL022 SSP Controller Driver");
2492MODULE_LICENSE("GPL");
2493