linux/drivers/i2c/busses/i2c-at91-master.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
   4 *
   5 *  Copyright (C) 2011 Weinmann Medical GmbH
   6 *  Author: Nikolaus Voss <n.voss@weinmann.de>
   7 *
   8 *  Evolved from original work by:
   9 *  Copyright (C) 2004 Rick Bronson
  10 *  Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
  11 *
  12 *  Borrowed heavily from original work by:
  13 *  Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
  14 */
  15
  16#include <linux/clk.h>
  17#include <linux/completion.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/dmaengine.h>
  20#include <linux/err.h>
  21#include <linux/gpio/consumer.h>
  22#include <linux/i2c.h>
  23#include <linux/interrupt.h>
  24#include <linux/io.h>
  25#include <linux/of.h>
  26#include <linux/of_device.h>
  27#include <linux/pinctrl/consumer.h>
  28#include <linux/platform_device.h>
  29#include <linux/pm_runtime.h>
  30
  31#include "i2c-at91.h"
  32
  33void at91_init_twi_bus_master(struct at91_twi_dev *dev)
  34{
  35        struct at91_twi_pdata *pdata = dev->pdata;
  36        u32 filtr = 0;
  37
  38        /* FIFO should be enabled immediately after the software reset */
  39        if (dev->fifo_size)
  40                at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
  41        at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
  42        at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
  43        at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
  44
  45        /* enable digital filter */
  46        if (pdata->has_dig_filtr && dev->enable_dig_filt)
  47                filtr |= AT91_TWI_FILTR_FILT;
  48
  49        /* enable advanced digital filter */
  50        if (pdata->has_adv_dig_filtr && dev->enable_dig_filt)
  51                filtr |= AT91_TWI_FILTR_FILT |
  52                         (AT91_TWI_FILTR_THRES(dev->filter_width) &
  53                         AT91_TWI_FILTR_THRES_MASK);
  54
  55        /* enable analog filter */
  56        if (pdata->has_ana_filtr && dev->enable_ana_filt)
  57                filtr |= AT91_TWI_FILTR_PADFEN;
  58
  59        if (filtr)
  60                at91_twi_write(dev, AT91_TWI_FILTR, filtr);
  61}
  62
  63/*
  64 * Calculate symmetric clock as stated in datasheet:
  65 * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
  66 */
  67static void at91_calc_twi_clock(struct at91_twi_dev *dev)
  68{
  69        int ckdiv, cdiv, div, hold = 0, filter_width = 0;
  70        struct at91_twi_pdata *pdata = dev->pdata;
  71        int offset = pdata->clk_offset;
  72        int max_ckdiv = pdata->clk_max_div;
  73        struct i2c_timings timings, *t = &timings;
  74
  75        i2c_parse_fw_timings(dev->dev, t, true);
  76
  77        div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
  78                                       2 * t->bus_freq_hz) - offset);
  79        ckdiv = fls(div >> 8);
  80        cdiv = div >> ckdiv;
  81
  82        if (ckdiv > max_ckdiv) {
  83                dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
  84                         ckdiv, max_ckdiv);
  85                ckdiv = max_ckdiv;
  86                cdiv = 255;
  87        }
  88
  89        if (pdata->has_hold_field) {
  90                /*
  91                 * hold time = HOLD + 3 x T_peripheral_clock
  92                 * Use clk rate in kHz to prevent overflows when computing
  93                 * hold.
  94                 */
  95                hold = DIV_ROUND_UP(t->sda_hold_ns
  96                                    * (clk_get_rate(dev->clk) / 1000), 1000000);
  97                hold -= 3;
  98                if (hold < 0)
  99                        hold = 0;
 100                if (hold > AT91_TWI_CWGR_HOLD_MAX) {
 101                        dev_warn(dev->dev,
 102                                 "HOLD field set to its maximum value (%d instead of %d)\n",
 103                                 AT91_TWI_CWGR_HOLD_MAX, hold);
 104                        hold = AT91_TWI_CWGR_HOLD_MAX;
 105                }
 106        }
 107
 108        if (pdata->has_adv_dig_filtr) {
 109                /*
 110                 * filter width = 0 to AT91_TWI_FILTR_THRES_MAX
 111                 * peripheral clocks
 112                 */
 113                filter_width = DIV_ROUND_UP(t->digital_filter_width_ns
 114                                * (clk_get_rate(dev->clk) / 1000), 1000000);
 115                if (filter_width > AT91_TWI_FILTR_THRES_MAX) {
 116                        dev_warn(dev->dev,
 117                                "Filter threshold set to its maximum value (%d instead of %d)\n",
 118                                AT91_TWI_FILTR_THRES_MAX, filter_width);
 119                        filter_width = AT91_TWI_FILTR_THRES_MAX;
 120                }
 121        }
 122
 123        dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv
 124                            | AT91_TWI_CWGR_HOLD(hold);
 125
 126        dev->filter_width = filter_width;
 127
 128        dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns), filter_width %d (%d ns)\n",
 129                cdiv, ckdiv, hold, t->sda_hold_ns, filter_width,
 130                t->digital_filter_width_ns);
 131}
 132
 133static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
 134{
 135        struct at91_twi_dma *dma = &dev->dma;
 136
 137        at91_twi_irq_save(dev);
 138
 139        if (dma->xfer_in_progress) {
 140                if (dma->direction == DMA_FROM_DEVICE)
 141                        dmaengine_terminate_sync(dma->chan_rx);
 142                else
 143                        dmaengine_terminate_sync(dma->chan_tx);
 144                dma->xfer_in_progress = false;
 145        }
 146        if (dma->buf_mapped) {
 147                dma_unmap_single(dev->dev, sg_dma_address(&dma->sg[0]),
 148                                 dev->buf_len, dma->direction);
 149                dma->buf_mapped = false;
 150        }
 151
 152        at91_twi_irq_restore(dev);
 153}
 154
 155static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
 156{
 157        if (!dev->buf_len)
 158                return;
 159
 160        /* 8bit write works with and without FIFO */
 161        writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
 162
 163        /* send stop when last byte has been written */
 164        if (--dev->buf_len == 0) {
 165                if (!dev->use_alt_cmd)
 166                        at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 167                at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
 168        }
 169
 170        dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
 171
 172        ++dev->buf;
 173}
 174
 175static void at91_twi_write_data_dma_callback(void *data)
 176{
 177        struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
 178
 179        dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
 180                         dev->buf_len, DMA_TO_DEVICE);
 181
 182        /*
 183         * When this callback is called, THR/TX FIFO is likely not to be empty
 184         * yet. So we have to wait for TXCOMP or NACK bits to be set into the
 185         * Status Register to be sure that the STOP bit has been sent and the
 186         * transfer is completed. The NACK interrupt has already been enabled,
 187         * we just have to enable TXCOMP one.
 188         */
 189        at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
 190        if (!dev->use_alt_cmd)
 191                at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 192}
 193
 194static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
 195{
 196        dma_addr_t dma_addr;
 197        struct dma_async_tx_descriptor *txdesc;
 198        struct at91_twi_dma *dma = &dev->dma;
 199        struct dma_chan *chan_tx = dma->chan_tx;
 200        unsigned int sg_len = 1;
 201
 202        if (!dev->buf_len)
 203                return;
 204
 205        dma->direction = DMA_TO_DEVICE;
 206
 207        at91_twi_irq_save(dev);
 208        dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
 209                                  DMA_TO_DEVICE);
 210        if (dma_mapping_error(dev->dev, dma_addr)) {
 211                dev_err(dev->dev, "dma map failed\n");
 212                return;
 213        }
 214        dma->buf_mapped = true;
 215        at91_twi_irq_restore(dev);
 216
 217        if (dev->fifo_size) {
 218                size_t part1_len, part2_len;
 219                struct scatterlist *sg;
 220                unsigned fifo_mr;
 221
 222                sg_len = 0;
 223
 224                part1_len = dev->buf_len & ~0x3;
 225                if (part1_len) {
 226                        sg = &dma->sg[sg_len++];
 227                        sg_dma_len(sg) = part1_len;
 228                        sg_dma_address(sg) = dma_addr;
 229                }
 230
 231                part2_len = dev->buf_len & 0x3;
 232                if (part2_len) {
 233                        sg = &dma->sg[sg_len++];
 234                        sg_dma_len(sg) = part2_len;
 235                        sg_dma_address(sg) = dma_addr + part1_len;
 236                }
 237
 238                /*
 239                 * DMA controller is triggered when at least 4 data can be
 240                 * written into the TX FIFO
 241                 */
 242                fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
 243                fifo_mr &= ~AT91_TWI_FMR_TXRDYM_MASK;
 244                fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA);
 245                at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
 246        } else {
 247                sg_dma_len(&dma->sg[0]) = dev->buf_len;
 248                sg_dma_address(&dma->sg[0]) = dma_addr;
 249        }
 250
 251        txdesc = dmaengine_prep_slave_sg(chan_tx, dma->sg, sg_len,
 252                                         DMA_MEM_TO_DEV,
 253                                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 254        if (!txdesc) {
 255                dev_err(dev->dev, "dma prep slave sg failed\n");
 256                goto error;
 257        }
 258
 259        txdesc->callback = at91_twi_write_data_dma_callback;
 260        txdesc->callback_param = dev;
 261
 262        dma->xfer_in_progress = true;
 263        dmaengine_submit(txdesc);
 264        dma_async_issue_pending(chan_tx);
 265
 266        return;
 267
 268error:
 269        at91_twi_dma_cleanup(dev);
 270}
 271
 272static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
 273{
 274        /*
 275         * If we are in this case, it means there is garbage data in RHR, so
 276         * delete them.
 277         */
 278        if (!dev->buf_len) {
 279                at91_twi_read(dev, AT91_TWI_RHR);
 280                return;
 281        }
 282
 283        /* 8bit read works with and without FIFO */
 284        *dev->buf = readb_relaxed(dev->base + AT91_TWI_RHR);
 285        --dev->buf_len;
 286
 287        /* return if aborting, we only needed to read RHR to clear RXRDY*/
 288        if (dev->recv_len_abort)
 289                return;
 290
 291        /* handle I2C_SMBUS_BLOCK_DATA */
 292        if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
 293                /* ensure length byte is a valid value */
 294                if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
 295                        dev->msg->flags &= ~I2C_M_RECV_LEN;
 296                        dev->buf_len += *dev->buf;
 297                        dev->msg->len = dev->buf_len + 1;
 298                        dev_dbg(dev->dev, "received block length %zu\n",
 299                                         dev->buf_len);
 300                } else {
 301                        /* abort and send the stop by reading one more byte */
 302                        dev->recv_len_abort = true;
 303                        dev->buf_len = 1;
 304                }
 305        }
 306
 307        /* send stop if second but last byte has been read */
 308        if (!dev->use_alt_cmd && dev->buf_len == 1)
 309                at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 310
 311        dev_dbg(dev->dev, "read 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
 312
 313        ++dev->buf;
 314}
 315
 316static void at91_twi_read_data_dma_callback(void *data)
 317{
 318        struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
 319        unsigned ier = AT91_TWI_TXCOMP;
 320
 321        dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
 322                         dev->buf_len, DMA_FROM_DEVICE);
 323
 324        if (!dev->use_alt_cmd) {
 325                /* The last two bytes have to be read without using dma */
 326                dev->buf += dev->buf_len - 2;
 327                dev->buf_len = 2;
 328                ier |= AT91_TWI_RXRDY;
 329        }
 330        at91_twi_write(dev, AT91_TWI_IER, ier);
 331}
 332
 333static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
 334{
 335        dma_addr_t dma_addr;
 336        struct dma_async_tx_descriptor *rxdesc;
 337        struct at91_twi_dma *dma = &dev->dma;
 338        struct dma_chan *chan_rx = dma->chan_rx;
 339        size_t buf_len;
 340
 341        buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
 342        dma->direction = DMA_FROM_DEVICE;
 343
 344        /* Keep in mind that we won't use dma to read the last two bytes */
 345        at91_twi_irq_save(dev);
 346        dma_addr = dma_map_single(dev->dev, dev->buf, buf_len, DMA_FROM_DEVICE);
 347        if (dma_mapping_error(dev->dev, dma_addr)) {
 348                dev_err(dev->dev, "dma map failed\n");
 349                return;
 350        }
 351        dma->buf_mapped = true;
 352        at91_twi_irq_restore(dev);
 353
 354        if (dev->fifo_size && IS_ALIGNED(buf_len, 4)) {
 355                unsigned fifo_mr;
 356
 357                /*
 358                 * DMA controller is triggered when at least 4 data can be
 359                 * read from the RX FIFO
 360                 */
 361                fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
 362                fifo_mr &= ~AT91_TWI_FMR_RXRDYM_MASK;
 363                fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_FOUR_DATA);
 364                at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
 365        }
 366
 367        sg_dma_len(&dma->sg[0]) = buf_len;
 368        sg_dma_address(&dma->sg[0]) = dma_addr;
 369
 370        rxdesc = dmaengine_prep_slave_sg(chan_rx, dma->sg, 1, DMA_DEV_TO_MEM,
 371                                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 372        if (!rxdesc) {
 373                dev_err(dev->dev, "dma prep slave sg failed\n");
 374                goto error;
 375        }
 376
 377        rxdesc->callback = at91_twi_read_data_dma_callback;
 378        rxdesc->callback_param = dev;
 379
 380        dma->xfer_in_progress = true;
 381        dmaengine_submit(rxdesc);
 382        dma_async_issue_pending(dma->chan_rx);
 383
 384        return;
 385
 386error:
 387        at91_twi_dma_cleanup(dev);
 388}
 389
 390static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
 391{
 392        struct at91_twi_dev *dev = dev_id;
 393        const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
 394        const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
 395
 396        if (!irqstatus)
 397                return IRQ_NONE;
 398        /*
 399         * In reception, the behavior of the twi device (before sama5d2) is
 400         * weird. There is some magic about RXRDY flag! When a data has been
 401         * almost received, the reception of a new one is anticipated if there
 402         * is no stop command to send. That is the reason why ask for sending
 403         * the stop command not on the last data but on the second last one.
 404         *
 405         * Unfortunately, we could still have the RXRDY flag set even if the
 406         * transfer is done and we have read the last data. It might happen
 407         * when the i2c slave device sends too quickly data after receiving the
 408         * ack from the master. The data has been almost received before having
 409         * the order to send stop. In this case, sending the stop command could
 410         * cause a RXRDY interrupt with a TXCOMP one. It is better to manage
 411         * the RXRDY interrupt first in order to not keep garbage data in the
 412         * Receive Holding Register for the next transfer.
 413         */
 414        if (irqstatus & AT91_TWI_RXRDY) {
 415                /*
 416                 * Read all available bytes at once by polling RXRDY usable w/
 417                 * and w/o FIFO. With FIFO enabled we could also read RXFL and
 418                 * avoid polling RXRDY.
 419                 */
 420                do {
 421                        at91_twi_read_next_byte(dev);
 422                } while (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY);
 423        }
 424
 425        /*
 426         * When a NACK condition is detected, the I2C controller sets the NACK,
 427         * TXCOMP and TXRDY bits all together in the Status Register (SR).
 428         *
 429         * 1 - Handling NACK errors with CPU write transfer.
 430         *
 431         * In such case, we should not write the next byte into the Transmit
 432         * Holding Register (THR) otherwise the I2C controller would start a new
 433         * transfer and the I2C slave is likely to reply by another NACK.
 434         *
 435         * 2 - Handling NACK errors with DMA write transfer.
 436         *
 437         * By setting the TXRDY bit in the SR, the I2C controller also triggers
 438         * the DMA controller to write the next data into the THR. Then the
 439         * result depends on the hardware version of the I2C controller.
 440         *
 441         * 2a - Without support of the Alternative Command mode.
 442         *
 443         * This is the worst case: the DMA controller is triggered to write the
 444         * next data into the THR, hence starting a new transfer: the I2C slave
 445         * is likely to reply by another NACK.
 446         * Concurrently, this interrupt handler is likely to be called to manage
 447         * the first NACK before the I2C controller detects the second NACK and
 448         * sets once again the NACK bit into the SR.
 449         * When handling the first NACK, this interrupt handler disables the I2C
 450         * controller interruptions, especially the NACK interrupt.
 451         * Hence, the NACK bit is pending into the SR. This is why we should
 452         * read the SR to clear all pending interrupts at the beginning of
 453         * at91_do_twi_transfer() before actually starting a new transfer.
 454         *
 455         * 2b - With support of the Alternative Command mode.
 456         *
 457         * When a NACK condition is detected, the I2C controller also locks the
 458         * THR (and sets the LOCK bit in the SR): even though the DMA controller
 459         * is triggered by the TXRDY bit to write the next data into the THR,
 460         * this data actually won't go on the I2C bus hence a second NACK is not
 461         * generated.
 462         */
 463        if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
 464                at91_disable_twi_interrupts(dev);
 465                complete(&dev->cmd_complete);
 466        } else if (irqstatus & AT91_TWI_TXRDY) {
 467                at91_twi_write_next_byte(dev);
 468        }
 469
 470        /* catch error flags */
 471        dev->transfer_status |= status;
 472
 473        return IRQ_HANDLED;
 474}
 475
 476static int at91_do_twi_transfer(struct at91_twi_dev *dev)
 477{
 478        int ret;
 479        unsigned long time_left;
 480        bool has_unre_flag = dev->pdata->has_unre_flag;
 481        bool has_alt_cmd = dev->pdata->has_alt_cmd;
 482
 483        /*
 484         * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
 485         * read flag but shows the state of the transmission at the time the
 486         * Status Register is read. According to the programmer datasheet,
 487         * TXCOMP is set when both holding register and internal shifter are
 488         * empty and STOP condition has been sent.
 489         * Consequently, we should enable NACK interrupt rather than TXCOMP to
 490         * detect transmission failure.
 491         * Indeed let's take the case of an i2c write command using DMA.
 492         * Whenever the slave doesn't acknowledge a byte, the LOCK, NACK and
 493         * TXCOMP bits are set together into the Status Register.
 494         * LOCK is a clear on write bit, which is set to prevent the DMA
 495         * controller from sending new data on the i2c bus after a NACK
 496         * condition has happened. Once locked, this i2c peripheral stops
 497         * triggering the DMA controller for new data but it is more than
 498         * likely that a new DMA transaction is already in progress, writing
 499         * into the Transmit Holding Register. Since the peripheral is locked,
 500         * these new data won't be sent to the i2c bus but they will remain
 501         * into the Transmit Holding Register, so TXCOMP bit is cleared.
 502         * Then when the interrupt handler is called, the Status Register is
 503         * read: the TXCOMP bit is clear but NACK bit is still set. The driver
 504         * manage the error properly, without waiting for timeout.
 505         * This case can be reproduced easyly when writing into an at24 eeprom.
 506         *
 507         * Besides, the TXCOMP bit is already set before the i2c transaction
 508         * has been started. For read transactions, this bit is cleared when
 509         * writing the START bit into the Control Register. So the
 510         * corresponding interrupt can safely be enabled just after.
 511         * However for write transactions managed by the CPU, we first write
 512         * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
 513         * interrupt. If TXCOMP interrupt were enabled before writing into THR,
 514         * the interrupt handler would be called immediately and the i2c command
 515         * would be reported as completed.
 516         * Also when a write transaction is managed by the DMA controller,
 517         * enabling the TXCOMP interrupt in this function may lead to a race
 518         * condition since we don't know whether the TXCOMP interrupt is enabled
 519         * before or after the DMA has started to write into THR. So the TXCOMP
 520         * interrupt is enabled later by at91_twi_write_data_dma_callback().
 521         * Immediately after in that DMA callback, if the alternative command
 522         * mode is not used, we still need to send the STOP condition manually
 523         * writing the corresponding bit into the Control Register.
 524         */
 525
 526        dev_dbg(dev->dev, "transfer: %s %zu bytes.\n",
 527                (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
 528
 529        reinit_completion(&dev->cmd_complete);
 530        dev->transfer_status = 0;
 531
 532        /* Clear pending interrupts, such as NACK. */
 533        at91_twi_read(dev, AT91_TWI_SR);
 534
 535        if (dev->fifo_size) {
 536                unsigned fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
 537
 538                /* Reset FIFO mode register */
 539                fifo_mr &= ~(AT91_TWI_FMR_TXRDYM_MASK |
 540                             AT91_TWI_FMR_RXRDYM_MASK);
 541                fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA);
 542                fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA);
 543                at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
 544
 545                /* Flush FIFOs */
 546                at91_twi_write(dev, AT91_TWI_CR,
 547                               AT91_TWI_THRCLR | AT91_TWI_RHRCLR);
 548        }
 549
 550        if (!dev->buf_len) {
 551                at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
 552                at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
 553        } else if (dev->msg->flags & I2C_M_RD) {
 554                unsigned start_flags = AT91_TWI_START;
 555
 556                /* if only one byte is to be read, immediately stop transfer */
 557                if (!dev->use_alt_cmd && dev->buf_len <= 1 &&
 558                    !(dev->msg->flags & I2C_M_RECV_LEN))
 559                        start_flags |= AT91_TWI_STOP;
 560                at91_twi_write(dev, AT91_TWI_CR, start_flags);
 561                /*
 562                 * When using dma without alternative command mode, the last
 563                 * byte has to be read manually in order to not send the stop
 564                 * command too late and then to receive extra data.
 565                 * In practice, there are some issues if you use the dma to
 566                 * read n-1 bytes because of latency.
 567                 * Reading n-2 bytes with dma and the two last ones manually
 568                 * seems to be the best solution.
 569                 */
 570                if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
 571                        at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
 572                        at91_twi_read_data_dma(dev);
 573                } else {
 574                        at91_twi_write(dev, AT91_TWI_IER,
 575                                       AT91_TWI_TXCOMP |
 576                                       AT91_TWI_NACK |
 577                                       AT91_TWI_RXRDY);
 578                }
 579        } else {
 580                if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
 581                        at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
 582                        at91_twi_write_data_dma(dev);
 583                } else {
 584                        at91_twi_write_next_byte(dev);
 585                        at91_twi_write(dev, AT91_TWI_IER,
 586                                       AT91_TWI_TXCOMP | AT91_TWI_NACK |
 587                                       (dev->buf_len ? AT91_TWI_TXRDY : 0));
 588                }
 589        }
 590
 591        time_left = wait_for_completion_timeout(&dev->cmd_complete,
 592                                              dev->adapter.timeout);
 593        if (time_left == 0) {
 594                dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
 595                dev_err(dev->dev, "controller timed out\n");
 596                at91_init_twi_bus(dev);
 597                ret = -ETIMEDOUT;
 598                goto error;
 599        }
 600        if (dev->transfer_status & AT91_TWI_NACK) {
 601                dev_dbg(dev->dev, "received nack\n");
 602                ret = -EREMOTEIO;
 603                goto error;
 604        }
 605        if (dev->transfer_status & AT91_TWI_OVRE) {
 606                dev_err(dev->dev, "overrun while reading\n");
 607                ret = -EIO;
 608                goto error;
 609        }
 610        if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
 611                dev_err(dev->dev, "underrun while writing\n");
 612                ret = -EIO;
 613                goto error;
 614        }
 615        if ((has_alt_cmd || dev->fifo_size) &&
 616            (dev->transfer_status & AT91_TWI_LOCK)) {
 617                dev_err(dev->dev, "tx locked\n");
 618                ret = -EIO;
 619                goto error;
 620        }
 621        if (dev->recv_len_abort) {
 622                dev_err(dev->dev, "invalid smbus block length recvd\n");
 623                ret = -EPROTO;
 624                goto error;
 625        }
 626
 627        dev_dbg(dev->dev, "transfer complete\n");
 628
 629        return 0;
 630
 631error:
 632        /* first stop DMA transfer if still in progress */
 633        at91_twi_dma_cleanup(dev);
 634        /* then flush THR/FIFO and unlock TX if locked */
 635        if ((has_alt_cmd || dev->fifo_size) &&
 636            (dev->transfer_status & AT91_TWI_LOCK)) {
 637                dev_dbg(dev->dev, "unlock tx\n");
 638                at91_twi_write(dev, AT91_TWI_CR,
 639                               AT91_TWI_THRCLR | AT91_TWI_LOCKCLR);
 640        }
 641
 642        /*
 643         * some faulty I2C slave devices might hold SDA down;
 644         * we can send a bus clear command, hoping that the pins will be
 645         * released
 646         */
 647        i2c_recover_bus(&dev->adapter);
 648
 649        return ret;
 650}
 651
 652static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
 653{
 654        struct at91_twi_dev *dev = i2c_get_adapdata(adap);
 655        int ret;
 656        unsigned int_addr_flag = 0;
 657        struct i2c_msg *m_start = msg;
 658        bool is_read;
 659
 660        dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
 661
 662        ret = pm_runtime_get_sync(dev->dev);
 663        if (ret < 0)
 664                goto out;
 665
 666        if (num == 2) {
 667                int internal_address = 0;
 668                int i;
 669
 670                /* 1st msg is put into the internal address, start with 2nd */
 671                m_start = &msg[1];
 672                for (i = 0; i < msg->len; ++i) {
 673                        const unsigned addr = msg->buf[msg->len - 1 - i];
 674
 675                        internal_address |= addr << (8 * i);
 676                        int_addr_flag += AT91_TWI_IADRSZ_1;
 677                }
 678                at91_twi_write(dev, AT91_TWI_IADR, internal_address);
 679        }
 680
 681        dev->use_alt_cmd = false;
 682        is_read = (m_start->flags & I2C_M_RD);
 683        if (dev->pdata->has_alt_cmd) {
 684                if (m_start->len > 0 &&
 685                    m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) {
 686                        at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
 687                        at91_twi_write(dev, AT91_TWI_ACR,
 688                                       AT91_TWI_ACR_DATAL(m_start->len) |
 689                                       ((is_read) ? AT91_TWI_ACR_DIR : 0));
 690                        dev->use_alt_cmd = true;
 691                } else {
 692                        at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
 693                }
 694        }
 695
 696        at91_twi_write(dev, AT91_TWI_MMR,
 697                       (m_start->addr << 16) |
 698                       int_addr_flag |
 699                       ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
 700
 701        dev->buf_len = m_start->len;
 702        dev->buf = m_start->buf;
 703        dev->msg = m_start;
 704        dev->recv_len_abort = false;
 705
 706        ret = at91_do_twi_transfer(dev);
 707
 708        ret = (ret < 0) ? ret : num;
 709out:
 710        pm_runtime_mark_last_busy(dev->dev);
 711        pm_runtime_put_autosuspend(dev->dev);
 712
 713        return ret;
 714}
 715
 716/*
 717 * The hardware can handle at most two messages concatenated by a
 718 * repeated start via it's internal address feature.
 719 */
 720static const struct i2c_adapter_quirks at91_twi_quirks = {
 721        .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR,
 722        .max_comb_1st_msg_len = 3,
 723};
 724
 725static u32 at91_twi_func(struct i2c_adapter *adapter)
 726{
 727        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
 728                | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
 729}
 730
 731static const struct i2c_algorithm at91_twi_algorithm = {
 732        .master_xfer    = at91_twi_xfer,
 733        .functionality  = at91_twi_func,
 734};
 735
 736static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
 737{
 738        int ret = 0;
 739        struct dma_slave_config slave_config;
 740        struct at91_twi_dma *dma = &dev->dma;
 741        enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 742
 743        /*
 744         * The actual width of the access will be chosen in
 745         * dmaengine_prep_slave_sg():
 746         * for each buffer in the scatter-gather list, if its size is aligned
 747         * to addr_width then addr_width accesses will be performed to transfer
 748         * the buffer. On the other hand, if the buffer size is not aligned to
 749         * addr_width then the buffer is transferred using single byte accesses.
 750         * Please refer to the Atmel eXtended DMA controller driver.
 751         * When FIFOs are used, the TXRDYM threshold can always be set to
 752         * trigger the XDMAC when at least 4 data can be written into the TX
 753         * FIFO, even if single byte accesses are performed.
 754         * However the RXRDYM threshold must be set to fit the access width,
 755         * deduced from buffer length, so the XDMAC is triggered properly to
 756         * read data from the RX FIFO.
 757         */
 758        if (dev->fifo_size)
 759                addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 760
 761        memset(&slave_config, 0, sizeof(slave_config));
 762        slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
 763        slave_config.src_addr_width = addr_width;
 764        slave_config.src_maxburst = 1;
 765        slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
 766        slave_config.dst_addr_width = addr_width;
 767        slave_config.dst_maxburst = 1;
 768        slave_config.device_fc = false;
 769
 770        dma->chan_tx = dma_request_chan(dev->dev, "tx");
 771        if (IS_ERR(dma->chan_tx)) {
 772                ret = PTR_ERR(dma->chan_tx);
 773                dma->chan_tx = NULL;
 774                goto error;
 775        }
 776
 777        dma->chan_rx = dma_request_chan(dev->dev, "rx");
 778        if (IS_ERR(dma->chan_rx)) {
 779                ret = PTR_ERR(dma->chan_rx);
 780                dma->chan_rx = NULL;
 781                goto error;
 782        }
 783
 784        slave_config.direction = DMA_MEM_TO_DEV;
 785        if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
 786                dev_err(dev->dev, "failed to configure tx channel\n");
 787                ret = -EINVAL;
 788                goto error;
 789        }
 790
 791        slave_config.direction = DMA_DEV_TO_MEM;
 792        if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
 793                dev_err(dev->dev, "failed to configure rx channel\n");
 794                ret = -EINVAL;
 795                goto error;
 796        }
 797
 798        sg_init_table(dma->sg, 2);
 799        dma->buf_mapped = false;
 800        dma->xfer_in_progress = false;
 801        dev->use_dma = true;
 802
 803        dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
 804                 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
 805
 806        return ret;
 807
 808error:
 809        if (ret != -EPROBE_DEFER)
 810                dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n");
 811        if (dma->chan_rx)
 812                dma_release_channel(dma->chan_rx);
 813        if (dma->chan_tx)
 814                dma_release_channel(dma->chan_tx);
 815        return ret;
 816}
 817
 818static int at91_init_twi_recovery_gpio(struct platform_device *pdev,
 819                                       struct at91_twi_dev *dev)
 820{
 821        struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
 822
 823        rinfo->pinctrl = devm_pinctrl_get(&pdev->dev);
 824        if (!rinfo->pinctrl || IS_ERR(rinfo->pinctrl)) {
 825                dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n");
 826                return PTR_ERR(rinfo->pinctrl);
 827        }
 828        dev->adapter.bus_recovery_info = rinfo;
 829
 830        return 0;
 831}
 832
 833static int at91_twi_recover_bus_cmd(struct i2c_adapter *adap)
 834{
 835        struct at91_twi_dev *dev = i2c_get_adapdata(adap);
 836
 837        dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
 838        if (!(dev->transfer_status & AT91_TWI_SDA)) {
 839                dev_dbg(dev->dev, "SDA is down; sending bus clear command\n");
 840                if (dev->use_alt_cmd) {
 841                        unsigned int acr;
 842
 843                        acr = at91_twi_read(dev, AT91_TWI_ACR);
 844                        acr &= ~AT91_TWI_ACR_DATAL_MASK;
 845                        at91_twi_write(dev, AT91_TWI_ACR, acr);
 846                }
 847                at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_CLEAR);
 848        }
 849
 850        return 0;
 851}
 852
 853static int at91_init_twi_recovery_info(struct platform_device *pdev,
 854                                       struct at91_twi_dev *dev)
 855{
 856        struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
 857        bool has_clear_cmd = dev->pdata->has_clear_cmd;
 858
 859        if (!has_clear_cmd)
 860                return at91_init_twi_recovery_gpio(pdev, dev);
 861
 862        rinfo->recover_bus = at91_twi_recover_bus_cmd;
 863        dev->adapter.bus_recovery_info = rinfo;
 864
 865        return 0;
 866}
 867
 868int at91_twi_probe_master(struct platform_device *pdev,
 869                          u32 phy_addr, struct at91_twi_dev *dev)
 870{
 871        int rc;
 872
 873        init_completion(&dev->cmd_complete);
 874
 875        rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
 876                              dev_name(dev->dev), dev);
 877        if (rc) {
 878                dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
 879                return rc;
 880        }
 881
 882        if (dev->dev->of_node) {
 883                rc = at91_twi_configure_dma(dev, phy_addr);
 884                if (rc == -EPROBE_DEFER)
 885                        return rc;
 886        }
 887
 888        if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
 889                                  &dev->fifo_size)) {
 890                dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
 891        }
 892
 893        dev->enable_dig_filt = of_property_read_bool(pdev->dev.of_node,
 894                                                     "i2c-digital-filter");
 895
 896        dev->enable_ana_filt = of_property_read_bool(pdev->dev.of_node,
 897                                                     "i2c-analog-filter");
 898        at91_calc_twi_clock(dev);
 899
 900        rc = at91_init_twi_recovery_info(pdev, dev);
 901        if (rc == -EPROBE_DEFER)
 902                return rc;
 903
 904        dev->adapter.algo = &at91_twi_algorithm;
 905        dev->adapter.quirks = &at91_twi_quirks;
 906
 907        return 0;
 908}
 909