linux/drivers/spi/spi-bcm2835.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Driver for Broadcom BCM2835 SPI Controllers
   4 *
   5 * Copyright (C) 2012 Chris Boot
   6 * Copyright (C) 2013 Stephen Warren
   7 * Copyright (C) 2015 Martin Sperl
   8 *
   9 * This driver is inspired by:
  10 * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
  11 * spi-atmel.c, Copyright (C) 2006 Atmel Corporation
  12 */
  13
  14#include <linux/clk.h>
  15#include <linux/completion.h>
  16#include <linux/debugfs.h>
  17#include <linux/delay.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/dmaengine.h>
  20#include <linux/err.h>
  21#include <linux/interrupt.h>
  22#include <linux/io.h>
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25#include <linux/of.h>
  26#include <linux/of_address.h>
  27#include <linux/of_device.h>
  28#include <linux/of_gpio.h>
  29#include <linux/of_irq.h>
  30#include <linux/spi/spi.h>
  31
  32/* SPI register offsets */
  33#define BCM2835_SPI_CS                  0x00
  34#define BCM2835_SPI_FIFO                0x04
  35#define BCM2835_SPI_CLK                 0x08
  36#define BCM2835_SPI_DLEN                0x0c
  37#define BCM2835_SPI_LTOH                0x10
  38#define BCM2835_SPI_DC                  0x14
  39
  40/* Bitfields in CS */
  41#define BCM2835_SPI_CS_LEN_LONG         0x02000000
  42#define BCM2835_SPI_CS_DMA_LEN          0x01000000
  43#define BCM2835_SPI_CS_CSPOL2           0x00800000
  44#define BCM2835_SPI_CS_CSPOL1           0x00400000
  45#define BCM2835_SPI_CS_CSPOL0           0x00200000
  46#define BCM2835_SPI_CS_RXF              0x00100000
  47#define BCM2835_SPI_CS_RXR              0x00080000
  48#define BCM2835_SPI_CS_TXD              0x00040000
  49#define BCM2835_SPI_CS_RXD              0x00020000
  50#define BCM2835_SPI_CS_DONE             0x00010000
  51#define BCM2835_SPI_CS_LEN              0x00002000
  52#define BCM2835_SPI_CS_REN              0x00001000
  53#define BCM2835_SPI_CS_ADCS             0x00000800
  54#define BCM2835_SPI_CS_INTR             0x00000400
  55#define BCM2835_SPI_CS_INTD             0x00000200
  56#define BCM2835_SPI_CS_DMAEN            0x00000100
  57#define BCM2835_SPI_CS_TA               0x00000080
  58#define BCM2835_SPI_CS_CSPOL            0x00000040
  59#define BCM2835_SPI_CS_CLEAR_RX         0x00000020
  60#define BCM2835_SPI_CS_CLEAR_TX         0x00000010
  61#define BCM2835_SPI_CS_CPOL             0x00000008
  62#define BCM2835_SPI_CS_CPHA             0x00000004
  63#define BCM2835_SPI_CS_CS_10            0x00000002
  64#define BCM2835_SPI_CS_CS_01            0x00000001
  65
  66#define BCM2835_SPI_FIFO_SIZE           64
  67#define BCM2835_SPI_FIFO_SIZE_3_4       48
  68#define BCM2835_SPI_DMA_MIN_LENGTH      96
  69#define BCM2835_SPI_MODE_BITS   (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
  70                                | SPI_NO_CS | SPI_3WIRE)
  71
  72#define DRV_NAME        "spi-bcm2835"
  73
  74/* define polling limits */
  75unsigned int polling_limit_us = 30;
  76module_param(polling_limit_us, uint, 0664);
  77MODULE_PARM_DESC(polling_limit_us,
  78                 "time in us to run a transfer in polling mode\n");
  79
  80/**
  81 * struct bcm2835_spi - BCM2835 SPI controller
  82 * @regs: base address of register map
  83 * @clk: core clock, divided to calculate serial clock
  84 * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full
  85 * @tfr: SPI transfer currently processed
  86 * @tx_buf: pointer whence next transmitted byte is read
  87 * @rx_buf: pointer where next received byte is written
  88 * @tx_len: remaining bytes to transmit
  89 * @rx_len: remaining bytes to receive
  90 * @tx_prologue: bytes transmitted without DMA if first TX sglist entry's
  91 *      length is not a multiple of 4 (to overcome hardware limitation)
  92 * @rx_prologue: bytes received without DMA if first RX sglist entry's
  93 *      length is not a multiple of 4 (to overcome hardware limitation)
  94 * @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
  95 * @dma_pending: whether a DMA transfer is in progress
  96 * @debugfs_dir: the debugfs directory - neede to remove debugfs when
  97 *      unloading the module
  98 * @count_transfer_polling: count of how often polling mode is used
  99 * @count_transfer_irq: count of how often interrupt mode is used
 100 * @count_transfer_irq_after_polling: count of how often we fall back to
 101 *      interrupt mode after starting in polling mode.
 102 *      These are counted as well in @count_transfer_polling and
 103 *      @count_transfer_irq
 104 * @count_transfer_dma: count how often dma mode is used
 105 */
 106struct bcm2835_spi {
 107        void __iomem *regs;
 108        struct clk *clk;
 109        int irq;
 110        struct spi_transfer *tfr;
 111        const u8 *tx_buf;
 112        u8 *rx_buf;
 113        int tx_len;
 114        int rx_len;
 115        int tx_prologue;
 116        int rx_prologue;
 117        unsigned int tx_spillover;
 118        unsigned int dma_pending;
 119
 120        struct dentry *debugfs_dir;
 121        u64 count_transfer_polling;
 122        u64 count_transfer_irq;
 123        u64 count_transfer_irq_after_polling;
 124        u64 count_transfer_dma;
 125};
 126
 127#if defined(CONFIG_DEBUG_FS)
 128static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
 129                                   const char *dname)
 130{
 131        char name[64];
 132        struct dentry *dir;
 133
 134        /* get full name */
 135        snprintf(name, sizeof(name), "spi-bcm2835-%s", dname);
 136
 137        /* the base directory */
 138        dir = debugfs_create_dir(name, NULL);
 139        bs->debugfs_dir = dir;
 140
 141        /* the counters */
 142        debugfs_create_u64("count_transfer_polling", 0444, dir,
 143                           &bs->count_transfer_polling);
 144        debugfs_create_u64("count_transfer_irq", 0444, dir,
 145                           &bs->count_transfer_irq);
 146        debugfs_create_u64("count_transfer_irq_after_polling", 0444, dir,
 147                           &bs->count_transfer_irq_after_polling);
 148        debugfs_create_u64("count_transfer_dma", 0444, dir,
 149                           &bs->count_transfer_dma);
 150}
 151
 152static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
 153{
 154        debugfs_remove_recursive(bs->debugfs_dir);
 155        bs->debugfs_dir = NULL;
 156}
 157#else
 158static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
 159                                   const char *dname)
 160{
 161}
 162
 163static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
 164{
 165}
 166#endif /* CONFIG_DEBUG_FS */
 167
 168static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
 169{
 170        return readl(bs->regs + reg);
 171}
 172
 173static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned reg, u32 val)
 174{
 175        writel(val, bs->regs + reg);
 176}
 177
 178static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs)
 179{
 180        u8 byte;
 181
 182        while ((bs->rx_len) &&
 183               (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) {
 184                byte = bcm2835_rd(bs, BCM2835_SPI_FIFO);
 185                if (bs->rx_buf)
 186                        *bs->rx_buf++ = byte;
 187                bs->rx_len--;
 188        }
 189}
 190
 191static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs)
 192{
 193        u8 byte;
 194
 195        while ((bs->tx_len) &&
 196               (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) {
 197                byte = bs->tx_buf ? *bs->tx_buf++ : 0;
 198                bcm2835_wr(bs, BCM2835_SPI_FIFO, byte);
 199                bs->tx_len--;
 200        }
 201}
 202
 203/**
 204 * bcm2835_rd_fifo_count() - blindly read exactly @count bytes from RX FIFO
 205 * @bs: BCM2835 SPI controller
 206 * @count: bytes to read from RX FIFO
 207 *
 208 * The caller must ensure that @bs->rx_len is greater than or equal to @count,
 209 * that the RX FIFO contains at least @count bytes and that the DMA Enable flag
 210 * in the CS register is set (such that a read from the FIFO register receives
 211 * 32-bit instead of just 8-bit).  Moreover @bs->rx_buf must not be %NULL.
 212 */
 213static inline void bcm2835_rd_fifo_count(struct bcm2835_spi *bs, int count)
 214{
 215        u32 val;
 216        int len;
 217
 218        bs->rx_len -= count;
 219
 220        while (count > 0) {
 221                val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
 222                len = min(count, 4);
 223                memcpy(bs->rx_buf, &val, len);
 224                bs->rx_buf += len;
 225                count -= 4;
 226        }
 227}
 228
 229/**
 230 * bcm2835_wr_fifo_count() - blindly write exactly @count bytes to TX FIFO
 231 * @bs: BCM2835 SPI controller
 232 * @count: bytes to write to TX FIFO
 233 *
 234 * The caller must ensure that @bs->tx_len is greater than or equal to @count,
 235 * that the TX FIFO can accommodate @count bytes and that the DMA Enable flag
 236 * in the CS register is set (such that a write to the FIFO register transmits
 237 * 32-bit instead of just 8-bit).
 238 */
 239static inline void bcm2835_wr_fifo_count(struct bcm2835_spi *bs, int count)
 240{
 241        u32 val;
 242        int len;
 243
 244        bs->tx_len -= count;
 245
 246        while (count > 0) {
 247                if (bs->tx_buf) {
 248                        len = min(count, 4);
 249                        memcpy(&val, bs->tx_buf, len);
 250                        bs->tx_buf += len;
 251                } else {
 252                        val = 0;
 253                }
 254                bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
 255                count -= 4;
 256        }
 257}
 258
 259/**
 260 * bcm2835_wait_tx_fifo_empty() - busy-wait for TX FIFO to empty
 261 * @bs: BCM2835 SPI controller
 262 *
 263 * The caller must ensure that the RX FIFO can accommodate as many bytes
 264 * as have been written to the TX FIFO:  Transmission is halted once the
 265 * RX FIFO is full, causing this function to spin forever.
 266 */
 267static inline void bcm2835_wait_tx_fifo_empty(struct bcm2835_spi *bs)
 268{
 269        while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
 270                cpu_relax();
 271}
 272
 273/**
 274 * bcm2835_rd_fifo_blind() - blindly read up to @count bytes from RX FIFO
 275 * @bs: BCM2835 SPI controller
 276 * @count: bytes available for reading in RX FIFO
 277 */
 278static inline void bcm2835_rd_fifo_blind(struct bcm2835_spi *bs, int count)
 279{
 280        u8 val;
 281
 282        count = min(count, bs->rx_len);
 283        bs->rx_len -= count;
 284
 285        while (count) {
 286                val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
 287                if (bs->rx_buf)
 288                        *bs->rx_buf++ = val;
 289                count--;
 290        }
 291}
 292
 293/**
 294 * bcm2835_wr_fifo_blind() - blindly write up to @count bytes to TX FIFO
 295 * @bs: BCM2835 SPI controller
 296 * @count: bytes available for writing in TX FIFO
 297 */
 298static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count)
 299{
 300        u8 val;
 301
 302        count = min(count, bs->tx_len);
 303        bs->tx_len -= count;
 304
 305        while (count) {
 306                val = bs->tx_buf ? *bs->tx_buf++ : 0;
 307                bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
 308                count--;
 309        }
 310}
 311
 312static void bcm2835_spi_reset_hw(struct spi_controller *ctlr)
 313{
 314        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
 315        u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
 316
 317        /* Disable SPI interrupts and transfer */
 318        cs &= ~(BCM2835_SPI_CS_INTR |
 319                BCM2835_SPI_CS_INTD |
 320                BCM2835_SPI_CS_DMAEN |
 321                BCM2835_SPI_CS_TA);
 322        /* and reset RX/TX FIFOS */
 323        cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
 324
 325        /* and reset the SPI_HW */
 326        bcm2835_wr(bs, BCM2835_SPI_CS, cs);
 327        /* as well as DLEN */
 328        bcm2835_wr(bs, BCM2835_SPI_DLEN, 0);
 329}
 330
 331static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
 332{
 333        struct spi_controller *ctlr = dev_id;
 334        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
 335        u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
 336
 337        /*
 338         * An interrupt is signaled either if DONE is set (TX FIFO empty)
 339         * or if RXR is set (RX FIFO >= ¾ full).
 340         */
 341        if (cs & BCM2835_SPI_CS_RXF)
 342                bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
 343        else if (cs & BCM2835_SPI_CS_RXR)
 344                bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE_3_4);
 345
 346        if (bs->tx_len && cs & BCM2835_SPI_CS_DONE)
 347                bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
 348
 349        /* Read as many bytes as possible from FIFO */
 350        bcm2835_rd_fifo(bs);
 351        /* Write as many bytes as possible to FIFO */
 352        bcm2835_wr_fifo(bs);
 353
 354        if (!bs->rx_len) {
 355                /* Transfer complete - reset SPI HW */
 356                bcm2835_spi_reset_hw(ctlr);
 357                /* wake up the framework */
 358                complete(&ctlr->xfer_completion);
 359        }
 360
 361        return IRQ_HANDLED;
 362}
 363
 364static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr,
 365                                        struct spi_device *spi,
 366                                        struct spi_transfer *tfr,
 367                                        u32 cs, bool fifo_empty)
 368{
 369        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
 370
 371        /* update usage statistics */
 372        bs->count_transfer_irq++;
 373
 374        /*
 375         * Enable HW block, but with interrupts still disabled.
 376         * Otherwise the empty TX FIFO would immediately trigger an interrupt.
 377         */
 378        bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
 379
 380        /* fill TX FIFO as much as possible */
 381        if (fifo_empty)
 382                bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
 383        bcm2835_wr_fifo(bs);
 384
 385        /* enable interrupts */
 386        cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA;
 387        bcm2835_wr(bs, BCM2835_SPI_CS, cs);
 388
 389        /* signal that we need to wait for completion */
 390        return 1;
 391}
 392
 393/**
 394 * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
 395 * @ctlr: SPI master controller
 396 * @tfr: SPI transfer
 397 * @bs: BCM2835 SPI controller
 398 * @cs: CS register
 399 *
 400 * A limitation in DMA mode is that the FIFO must be accessed in 4 byte chunks.
 401 * Only the final write access is permitted to transmit less than 4 bytes, the
 402 * SPI controller deduces its intended size from the DLEN register.
 403 *
 404 * If a TX or RX sglist contains multiple entries, one per page, and the first
 405 * entry starts in the middle of a page, that first entry's length may not be
 406 * a multiple of 4.  Subsequent entries are fine because they span an entire
 407 * page, hence do have a length that's a multiple of 4.
 408 *
 409 * This cannot happen with kmalloc'ed buffers (which is what most clients use)
 410 * because they are contiguous in physical memory and therefore not split on
 411 * page boundaries by spi_map_buf().  But it *can* happen with vmalloc'ed
 412 * buffers.
 413 *
 414 * The DMA engine is incapable of combining sglist entries into a continuous
 415 * stream of 4 byte chunks, it treats every entry separately:  A TX entry is
 416 * rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX
 417 * entry is rounded up by throwing away received bytes.
 418 *
 419 * Overcome this limitation by transferring the first few bytes without DMA:
 420 * E.g. if the first TX sglist entry's length is 23 and the first RX's is 42,
 421 * write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO.
 422 * The residue of 1 byte in the RX FIFO is picked up by DMA.  Together with
 423 * the rest of the first RX sglist entry it makes up a multiple of 4 bytes.
 424 *
 425 * Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1,
 426 * write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO.
 427 * Caution, the additional 4 bytes spill over to the second TX sglist entry
 428 * if the length of the first is *exactly* 1.
 429 *
 430 * At most 6 bytes are written and at most 3 bytes read.  Do we know the
 431 * transfer has this many bytes?  Yes, see BCM2835_SPI_DMA_MIN_LENGTH.
 432 *
 433 * The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width
 434 * by the DMA engine.  Toggling the DMA Enable flag in the CS register switches
 435 * the width but also garbles the FIFO's contents.  The prologue must therefore
 436 * be transmitted in 32-bit width to ensure that the following DMA transfer can
 437 * pick up the residue in the RX FIFO in ungarbled form.
 438 */
 439static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
 440                                          struct spi_transfer *tfr,
 441                                          struct bcm2835_spi *bs,
 442                                          u32 cs)
 443{
 444        int tx_remaining;
 445
 446        bs->tfr          = tfr;
 447        bs->tx_prologue  = 0;
 448        bs->rx_prologue  = 0;
 449        bs->tx_spillover = false;
 450
 451        if (!sg_is_last(&tfr->tx_sg.sgl[0]))
 452                bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;
 453
 454        if (!sg_is_last(&tfr->rx_sg.sgl[0])) {
 455                bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;
 456
 457                if (bs->rx_prologue > bs->tx_prologue) {
 458                        if (sg_is_last(&tfr->tx_sg.sgl[0])) {
 459                                bs->tx_prologue  = bs->rx_prologue;
 460                        } else {
 461                                bs->tx_prologue += 4;
 462                                bs->tx_spillover =
 463                                        !(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3);
 464                        }
 465                }
 466        }
 467
 468        /* rx_prologue > 0 implies tx_prologue > 0, so check only the latter */
 469        if (!bs->tx_prologue)
 470                return;
 471
 472        /* Write and read RX prologue.  Adjust first entry in RX sglist. */
 473        if (bs->rx_prologue) {
 474                bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue);
 475                bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
 476                                                  | BCM2835_SPI_CS_DMAEN);
 477                bcm2835_wr_fifo_count(bs, bs->rx_prologue);
 478                bcm2835_wait_tx_fifo_empty(bs);
 479                bcm2835_rd_fifo_count(bs, bs->rx_prologue);
 480                bcm2835_spi_reset_hw(ctlr);
 481
 482                dma_sync_single_for_device(ctlr->dma_rx->device->dev,
 483                                           sg_dma_address(&tfr->rx_sg.sgl[0]),
 484                                           bs->rx_prologue, DMA_FROM_DEVICE);
 485
 486                sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
 487                sg_dma_len(&tfr->rx_sg.sgl[0])     -= bs->rx_prologue;
 488        }
 489
 490        /*
 491         * Write remaining TX prologue.  Adjust first entry in TX sglist.
 492         * Also adjust second entry if prologue spills over to it.
 493         */
 494        tx_remaining = bs->tx_prologue - bs->rx_prologue;
 495        if (tx_remaining) {
 496                bcm2835_wr(bs, BCM2835_SPI_DLEN, tx_remaining);
 497                bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
 498                                                  | BCM2835_SPI_CS_DMAEN);
 499                bcm2835_wr_fifo_count(bs, tx_remaining);
 500                bcm2835_wait_tx_fifo_empty(bs);
 501                bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX);
 502        }
 503
 504        if (likely(!bs->tx_spillover)) {
 505                sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
 506                sg_dma_len(&tfr->tx_sg.sgl[0])     -= bs->tx_prologue;
 507        } else {
 508                sg_dma_len(&tfr->tx_sg.sgl[0])      = 0;
 509                sg_dma_address(&tfr->tx_sg.sgl[1]) += 4;
 510                sg_dma_len(&tfr->tx_sg.sgl[1])     -= 4;
 511        }
 512}
 513
 514/**
 515 * bcm2835_spi_undo_prologue() - reconstruct original sglist state
 516 * @bs: BCM2835 SPI controller
 517 *
 518 * Undo changes which were made to an SPI transfer's sglist when transmitting
 519 * the prologue.  This is necessary to ensure the same memory ranges are
 520 * unmapped that were originally mapped.
 521 */
 522static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
 523{
 524        struct spi_transfer *tfr = bs->tfr;
 525
 526        if (!bs->tx_prologue)
 527                return;
 528
 529        if (bs->rx_prologue) {
 530                sg_dma_address(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
 531                sg_dma_len(&tfr->rx_sg.sgl[0])     += bs->rx_prologue;
 532        }
 533
 534        if (likely(!bs->tx_spillover)) {
 535                sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
 536                sg_dma_len(&tfr->tx_sg.sgl[0])     += bs->tx_prologue;
 537        } else {
 538                sg_dma_len(&tfr->tx_sg.sgl[0])      = bs->tx_prologue - 4;
 539                sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4;
 540                sg_dma_len(&tfr->tx_sg.sgl[1])     += 4;
 541        }
 542}
 543
 544static void bcm2835_spi_dma_done(void *data)
 545{
 546        struct spi_controller *ctlr = data;
 547        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
 548
 549        /* reset fifo and HW */
 550        bcm2835_spi_reset_hw(ctlr);
 551
 552        /* and terminate tx-dma as we do not have an irq for it
 553         * because when the rx dma will terminate and this callback
 554         * is called the tx-dma must have finished - can't get to this
 555         * situation otherwise...
 556         */
 557        if (cmpxchg(&bs->dma_pending, true, false)) {
 558                dmaengine_terminate_async(ctlr->dma_tx);
 559                bcm2835_spi_undo_prologue(bs);
 560        }
 561
 562        /* and mark as completed */;
 563        complete(&ctlr->xfer_completion);
 564}
 565
 566static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
 567                                  struct spi_transfer *tfr,
 568                                  bool is_tx)
 569{
 570        struct dma_chan *chan;
 571        struct scatterlist *sgl;
 572        unsigned int nents;
 573        enum dma_transfer_direction dir;
 574        unsigned long flags;
 575
 576        struct dma_async_tx_descriptor *desc;
 577        dma_cookie_t cookie;
 578
 579        if (is_tx) {
 580                dir   = DMA_MEM_TO_DEV;
 581                chan  = ctlr->dma_tx;
 582                nents = tfr->tx_sg.nents;
 583                sgl   = tfr->tx_sg.sgl;
 584                flags = 0 /* no  tx interrupt */;
 585
 586        } else {
 587                dir   = DMA_DEV_TO_MEM;
 588                chan  = ctlr->dma_rx;
 589                nents = tfr->rx_sg.nents;
 590                sgl   = tfr->rx_sg.sgl;
 591                flags = DMA_PREP_INTERRUPT;
 592        }
 593        /* prepare the channel */
 594        desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
 595        if (!desc)
 596                return -EINVAL;
 597
 598        /* set callback for rx */
 599        if (!is_tx) {
 600                desc->callback = bcm2835_spi_dma_done;
 601                desc->callback_param = ctlr;
 602        }
 603
 604        /* submit it to DMA-engine */
 605        cookie = dmaengine_submit(desc);
 606
 607        return dma_submit_error(cookie);
 608}
 609
 610static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
 611                                        struct spi_device *spi,
 612                                        struct spi_transfer *tfr,
 613                                        u32 cs)
 614{
 615        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
 616        int ret;
 617
 618        /* update usage statistics */
 619        bs->count_transfer_dma++;
 620
 621        /*
 622         * Transfer first few bytes without DMA if length of first TX or RX
 623         * sglist entry is not a multiple of 4 bytes (hardware limitation).
 624         */
 625        bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs);
 626
 627        /* setup tx-DMA */
 628        ret = bcm2835_spi_prepare_sg(ctlr, tfr, true);
 629        if (ret)
 630                goto err_reset_hw;
 631
 632        /* start TX early */
 633        dma_async_issue_pending(ctlr->dma_tx);
 634
 635        /* mark as dma pending */
 636        bs->dma_pending = 1;
 637
 638        /* set the DMA length */
 639        bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len);
 640
 641        /* start the HW */
 642        bcm2835_wr(bs, BCM2835_SPI_CS,
 643                   cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
 644
 645        /* setup rx-DMA late - to run transfers while
 646         * mapping of the rx buffers still takes place
 647         * this saves 10us or more.
 648         */
 649        ret = bcm2835_spi_prepare_sg(ctlr, tfr, false);
 650        if (ret) {
 651                /* need to reset on errors */
 652                dmaengine_terminate_sync(ctlr->dma_tx);
 653                bs->dma_pending = false;
 654                goto err_reset_hw;
 655        }
 656
 657        /* start rx dma late */
 658        dma_async_issue_pending(ctlr->dma_rx);
 659
 660        /* wait for wakeup in framework */
 661        return 1;
 662
 663err_reset_hw:
 664        bcm2835_spi_reset_hw(ctlr);
 665        bcm2835_spi_undo_prologue(bs);
 666        return ret;
 667}
 668
 669static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
 670                                struct spi_device *spi,
 671                                struct spi_transfer *tfr)
 672{
 673        /* we start DMA efforts only on bigger transfers */
 674        if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
 675                return false;
 676
 677        /* return OK */
 678        return true;
 679}
 680
 681static void bcm2835_dma_release(struct spi_controller *ctlr)
 682{
 683        if (ctlr->dma_tx) {
 684                dmaengine_terminate_sync(ctlr->dma_tx);
 685                dma_release_channel(ctlr->dma_tx);
 686                ctlr->dma_tx = NULL;
 687        }
 688        if (ctlr->dma_rx) {
 689                dmaengine_terminate_sync(ctlr->dma_rx);
 690                dma_release_channel(ctlr->dma_rx);
 691                ctlr->dma_rx = NULL;
 692        }
 693}
 694
 695static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
 696{
 697        struct dma_slave_config slave_config;
 698        const __be32 *addr;
 699        dma_addr_t dma_reg_base;
 700        int ret;
 701
 702        /* base address in dma-space */
 703        addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
 704        if (!addr) {
 705                dev_err(dev, "could not get DMA-register address - not using dma mode\n");
 706                goto err;
 707        }
 708        dma_reg_base = be32_to_cpup(addr);
 709
 710        /* get tx/rx dma */
 711        ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
 712        if (!ctlr->dma_tx) {
 713                dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
 714                goto err;
 715        }
 716        ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
 717        if (!ctlr->dma_rx) {
 718                dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
 719                goto err_release;
 720        }
 721
 722        /* configure DMAs */
 723        slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
 724        slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 725
 726        ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config);
 727        if (ret)
 728                goto err_config;
 729
 730        slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
 731        slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 732
 733        ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config);
 734        if (ret)
 735                goto err_config;
 736
 737        /* all went well, so set can_dma */
 738        ctlr->can_dma = bcm2835_spi_can_dma;
 739        /* need to do TX AND RX DMA, so we need dummy buffers */
 740        ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
 741
 742        return;
 743
 744err_config:
 745        dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
 746                ret);
 747err_release:
 748        bcm2835_dma_release(ctlr);
 749err:
 750        return;
 751}
 752
 753static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
 754                                         struct spi_device *spi,
 755                                         struct spi_transfer *tfr,
 756                                         u32 cs)
 757{
 758        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
 759        unsigned long timeout;
 760
 761        /* update usage statistics */
 762        bs->count_transfer_polling++;
 763
 764        /* enable HW block without interrupts */
 765        bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
 766
 767        /* fill in the fifo before timeout calculations
 768         * if we are interrupted here, then the data is
 769         * getting transferred by the HW while we are interrupted
 770         */
 771        bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
 772
 773        /* set the timeout to at least 2 jiffies */
 774        timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
 775
 776        /* loop until finished the transfer */
 777        while (bs->rx_len) {
 778                /* fill in tx fifo with remaining data */
 779                bcm2835_wr_fifo(bs);
 780
 781                /* read from fifo as much as possible */
 782                bcm2835_rd_fifo(bs);
 783
 784                /* if there is still data pending to read
 785                 * then check the timeout
 786                 */
 787                if (bs->rx_len && time_after(jiffies, timeout)) {
 788                        dev_dbg_ratelimited(&spi->dev,
 789                                            "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
 790                                            jiffies - timeout,
 791                                            bs->tx_len, bs->rx_len);
 792                        /* fall back to interrupt mode */
 793
 794                        /* update usage statistics */
 795                        bs->count_transfer_irq_after_polling++;
 796
 797                        return bcm2835_spi_transfer_one_irq(ctlr, spi,
 798                                                            tfr, cs, false);
 799                }
 800        }
 801
 802        /* Transfer complete - reset SPI HW */
 803        bcm2835_spi_reset_hw(ctlr);
 804        /* and return without waiting for completion */
 805        return 0;
 806}
 807
 808static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
 809                                    struct spi_device *spi,
 810                                    struct spi_transfer *tfr)
 811{
 812        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
 813        unsigned long spi_hz, clk_hz, cdiv, spi_used_hz;
 814        unsigned long hz_per_byte, byte_limit;
 815        u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
 816
 817        /* set clock */
 818        spi_hz = tfr->speed_hz;
 819        clk_hz = clk_get_rate(bs->clk);
 820
 821        if (spi_hz >= clk_hz / 2) {
 822                cdiv = 2; /* clk_hz/2 is the fastest we can go */
 823        } else if (spi_hz) {
 824                /* CDIV must be a multiple of two */
 825                cdiv = DIV_ROUND_UP(clk_hz, spi_hz);
 826                cdiv += (cdiv % 2);
 827
 828                if (cdiv >= 65536)
 829                        cdiv = 0; /* 0 is the slowest we can go */
 830        } else {
 831                cdiv = 0; /* 0 is the slowest we can go */
 832        }
 833        spi_used_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
 834        bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
 835
 836        /* handle all the 3-wire mode */
 837        if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
 838            tfr->rx_buf != ctlr->dummy_rx)
 839                cs |= BCM2835_SPI_CS_REN;
 840        else
 841                cs &= ~BCM2835_SPI_CS_REN;
 842
 843        /*
 844         * The driver always uses software-controlled GPIO Chip Select.
 845         * Set the hardware-controlled native Chip Select to an invalid
 846         * value to prevent it from interfering.
 847         */
 848        cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
 849
 850        /* set transmit buffers and length */
 851        bs->tx_buf = tfr->tx_buf;
 852        bs->rx_buf = tfr->rx_buf;
 853        bs->tx_len = tfr->len;
 854        bs->rx_len = tfr->len;
 855
 856        /* Calculate the estimated time in us the transfer runs.  Note that
 857         * there is 1 idle clocks cycles after each byte getting transferred
 858         * so we have 9 cycles/byte.  This is used to find the number of Hz
 859         * per byte per polling limit.  E.g., we can transfer 1 byte in 30 us
 860         * per 300,000 Hz of bus clock.
 861         */
 862        hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
 863        byte_limit = hz_per_byte ? spi_used_hz / hz_per_byte : 1;
 864
 865        /* run in polling mode for short transfers */
 866        if (tfr->len < byte_limit)
 867                return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs);
 868
 869        /* run in dma mode if conditions are right
 870         * Note that unlike poll or interrupt mode DMA mode does not have
 871         * this 1 idle clock cycle pattern but runs the spi clock without gaps
 872         */
 873        if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr))
 874                return bcm2835_spi_transfer_one_dma(ctlr, spi, tfr, cs);
 875
 876        /* run in interrupt-mode */
 877        return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true);
 878}
 879
 880static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
 881                                       struct spi_message *msg)
 882{
 883        struct spi_device *spi = msg->spi;
 884        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
 885        u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
 886        int ret;
 887
 888        if (ctlr->can_dma) {
 889                /*
 890                 * DMA transfers are limited to 16 bit (0 to 65535 bytes) by
 891                 * the SPI HW due to DLEN. Split up transfers (32-bit FIFO
 892                 * aligned) if the limit is exceeded.
 893                 */
 894                ret = spi_split_transfers_maxsize(ctlr, msg, 65532,
 895                                                  GFP_KERNEL | GFP_DMA);
 896                if (ret)
 897                        return ret;
 898        }
 899
 900        cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
 901
 902        if (spi->mode & SPI_CPOL)
 903                cs |= BCM2835_SPI_CS_CPOL;
 904        if (spi->mode & SPI_CPHA)
 905                cs |= BCM2835_SPI_CS_CPHA;
 906
 907        bcm2835_wr(bs, BCM2835_SPI_CS, cs);
 908
 909        return 0;
 910}
 911
 912static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
 913                                   struct spi_message *msg)
 914{
 915        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
 916
 917        /* if an error occurred and we have an active dma, then terminate */
 918        if (cmpxchg(&bs->dma_pending, true, false)) {
 919                dmaengine_terminate_sync(ctlr->dma_tx);
 920                dmaengine_terminate_sync(ctlr->dma_rx);
 921                bcm2835_spi_undo_prologue(bs);
 922        }
 923        /* and reset */
 924        bcm2835_spi_reset_hw(ctlr);
 925}
 926
 927static int chip_match_name(struct gpio_chip *chip, void *data)
 928{
 929        return !strcmp(chip->label, data);
 930}
 931
 932static int bcm2835_spi_setup(struct spi_device *spi)
 933{
 934        int err;
 935        struct gpio_chip *chip;
 936        /*
 937         * sanity checking the native-chipselects
 938         */
 939        if (spi->mode & SPI_NO_CS)
 940                return 0;
 941        if (gpio_is_valid(spi->cs_gpio))
 942                return 0;
 943        if (spi->chip_select > 1) {
 944                /* error in the case of native CS requested with CS > 1
 945                 * officially there is a CS2, but it is not documented
 946                 * which GPIO is connected with that...
 947                 */
 948                dev_err(&spi->dev,
 949                        "setup: only two native chip-selects are supported\n");
 950                return -EINVAL;
 951        }
 952        /* now translate native cs to GPIO */
 953
 954        /* get the gpio chip for the base */
 955        chip = gpiochip_find("pinctrl-bcm2835", chip_match_name);
 956        if (!chip)
 957                return 0;
 958
 959        /* and calculate the real CS */
 960        spi->cs_gpio = chip->base + 8 - spi->chip_select;
 961
 962        /* and set up the "mode" and level */
 963        dev_info(&spi->dev, "setting up native-CS%i as GPIO %i\n",
 964                 spi->chip_select, spi->cs_gpio);
 965
 966        /* set up GPIO as output and pull to the correct level */
 967        err = gpio_direction_output(spi->cs_gpio,
 968                                    (spi->mode & SPI_CS_HIGH) ? 0 : 1);
 969        if (err) {
 970                dev_err(&spi->dev,
 971                        "could not set CS%i gpio %i as output: %i",
 972                        spi->chip_select, spi->cs_gpio, err);
 973                return err;
 974        }
 975
 976        return 0;
 977}
 978
 979static int bcm2835_spi_probe(struct platform_device *pdev)
 980{
 981        struct spi_controller *ctlr;
 982        struct bcm2835_spi *bs;
 983        struct resource *res;
 984        int err;
 985
 986        ctlr = spi_alloc_master(&pdev->dev, sizeof(*bs));
 987        if (!ctlr)
 988                return -ENOMEM;
 989
 990        platform_set_drvdata(pdev, ctlr);
 991
 992        ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
 993        ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
 994        ctlr->num_chipselect = 3;
 995        ctlr->setup = bcm2835_spi_setup;
 996        ctlr->transfer_one = bcm2835_spi_transfer_one;
 997        ctlr->handle_err = bcm2835_spi_handle_err;
 998        ctlr->prepare_message = bcm2835_spi_prepare_message;
 999        ctlr->dev.of_node = pdev->dev.of_node;
1000
1001        bs = spi_controller_get_devdata(ctlr);
1002
1003        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1004        bs->regs = devm_ioremap_resource(&pdev->dev, res);
1005        if (IS_ERR(bs->regs)) {
1006                err = PTR_ERR(bs->regs);
1007                goto out_controller_put;
1008        }
1009
1010        bs->clk = devm_clk_get(&pdev->dev, NULL);
1011        if (IS_ERR(bs->clk)) {
1012                err = PTR_ERR(bs->clk);
1013                dev_err(&pdev->dev, "could not get clk: %d\n", err);
1014                goto out_controller_put;
1015        }
1016
1017        bs->irq = platform_get_irq(pdev, 0);
1018        if (bs->irq <= 0) {
1019                dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
1020                err = bs->irq ? bs->irq : -ENODEV;
1021                goto out_controller_put;
1022        }
1023
1024        clk_prepare_enable(bs->clk);
1025
1026        bcm2835_dma_init(ctlr, &pdev->dev);
1027
1028        /* initialise the hardware with the default polarities */
1029        bcm2835_wr(bs, BCM2835_SPI_CS,
1030                   BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
1031
1032        err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
1033                               dev_name(&pdev->dev), ctlr);
1034        if (err) {
1035                dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
1036                goto out_clk_disable;
1037        }
1038
1039        err = devm_spi_register_controller(&pdev->dev, ctlr);
1040        if (err) {
1041                dev_err(&pdev->dev, "could not register SPI controller: %d\n",
1042                        err);
1043                goto out_clk_disable;
1044        }
1045
1046        bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
1047
1048        return 0;
1049
1050out_clk_disable:
1051        clk_disable_unprepare(bs->clk);
1052out_controller_put:
1053        spi_controller_put(ctlr);
1054        return err;
1055}
1056
1057static int bcm2835_spi_remove(struct platform_device *pdev)
1058{
1059        struct spi_controller *ctlr = platform_get_drvdata(pdev);
1060        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
1061
1062        bcm2835_debugfs_remove(bs);
1063
1064        /* Clear FIFOs, and disable the HW block */
1065        bcm2835_wr(bs, BCM2835_SPI_CS,
1066                   BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
1067
1068        clk_disable_unprepare(bs->clk);
1069
1070        bcm2835_dma_release(ctlr);
1071
1072        return 0;
1073}
1074
1075static const struct of_device_id bcm2835_spi_match[] = {
1076        { .compatible = "brcm,bcm2835-spi", },
1077        {}
1078};
1079MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
1080
1081static struct platform_driver bcm2835_spi_driver = {
1082        .driver         = {
1083                .name           = DRV_NAME,
1084                .of_match_table = bcm2835_spi_match,
1085        },
1086        .probe          = bcm2835_spi_probe,
1087        .remove         = bcm2835_spi_remove,
1088};
1089module_platform_driver(bcm2835_spi_driver);
1090
1091MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835");
1092MODULE_AUTHOR("Chris Boot <bootc@bootc.net>");
1093MODULE_LICENSE("GPL");
1094