linux/drivers/dma/qcom/hidma_ll.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Qualcomm Technologies HIDMA DMA engine low level code
   4 *
   5 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
   6 */
   7
   8#include <linux/dmaengine.h>
   9#include <linux/slab.h>
  10#include <linux/interrupt.h>
  11#include <linux/mm.h>
  12#include <linux/highmem.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/delay.h>
  15#include <linux/atomic.h>
  16#include <linux/iopoll.h>
  17#include <linux/kfifo.h>
  18#include <linux/bitops.h>
  19
  20#include "hidma.h"
  21
  22#define HIDMA_EVRE_SIZE                 16      /* each EVRE is 16 bytes */
  23
  24#define HIDMA_TRCA_CTRLSTS_REG                  0x000
  25#define HIDMA_TRCA_RING_LOW_REG         0x008
  26#define HIDMA_TRCA_RING_HIGH_REG                0x00C
  27#define HIDMA_TRCA_RING_LEN_REG         0x010
  28#define HIDMA_TRCA_DOORBELL_REG         0x400
  29
  30#define HIDMA_EVCA_CTRLSTS_REG                  0x000
  31#define HIDMA_EVCA_INTCTRL_REG                  0x004
  32#define HIDMA_EVCA_RING_LOW_REG         0x008
  33#define HIDMA_EVCA_RING_HIGH_REG                0x00C
  34#define HIDMA_EVCA_RING_LEN_REG         0x010
  35#define HIDMA_EVCA_WRITE_PTR_REG                0x020
  36#define HIDMA_EVCA_DOORBELL_REG         0x400
  37
  38#define HIDMA_EVCA_IRQ_STAT_REG         0x100
  39#define HIDMA_EVCA_IRQ_CLR_REG                  0x108
  40#define HIDMA_EVCA_IRQ_EN_REG                   0x110
  41
  42#define HIDMA_EVRE_CFG_IDX                      0
  43
  44#define HIDMA_EVRE_ERRINFO_BIT_POS              24
  45#define HIDMA_EVRE_CODE_BIT_POS         28
  46
  47#define HIDMA_EVRE_ERRINFO_MASK         GENMASK(3, 0)
  48#define HIDMA_EVRE_CODE_MASK                    GENMASK(3, 0)
  49
  50#define HIDMA_CH_CONTROL_MASK                   GENMASK(7, 0)
  51#define HIDMA_CH_STATE_MASK                     GENMASK(7, 0)
  52#define HIDMA_CH_STATE_BIT_POS                  0x8
  53
  54#define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0
  55#define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1
  56#define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS   9
  57#define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS      10
  58#define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS      11
  59#define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS     14
  60
  61#define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS)       | \
  62                     BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS)       | \
  63                     BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
  64                     BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS)    | \
  65                     BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS)    | \
  66                     BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS))
  67
  68#define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \
  69do {                                                            \
  70        iter += size;                                           \
  71        if (iter >= ring_size)                                  \
  72                iter -= ring_size;                              \
  73} while (0)
  74
  75#define HIDMA_CH_STATE(val)     \
  76        ((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK)
  77
  78#define HIDMA_ERR_INT_MASK                              \
  79        (BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS)   |   \
  80         BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) |   \
  81         BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS)       |   \
  82         BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS)    |   \
  83         BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS))
  84
  85enum ch_command {
  86        HIDMA_CH_DISABLE = 0,
  87        HIDMA_CH_ENABLE = 1,
  88        HIDMA_CH_SUSPEND = 2,
  89        HIDMA_CH_RESET = 9,
  90};
  91
  92enum ch_state {
  93        HIDMA_CH_DISABLED = 0,
  94        HIDMA_CH_ENABLED = 1,
  95        HIDMA_CH_RUNNING = 2,
  96        HIDMA_CH_SUSPENDED = 3,
  97        HIDMA_CH_STOPPED = 4,
  98};
  99
 100enum err_code {
 101        HIDMA_EVRE_STATUS_COMPLETE = 1,
 102        HIDMA_EVRE_STATUS_ERROR = 4,
 103};
 104
 105static int hidma_is_chan_enabled(int state)
 106{
 107        switch (state) {
 108        case HIDMA_CH_ENABLED:
 109        case HIDMA_CH_RUNNING:
 110                return true;
 111        default:
 112                return false;
 113        }
 114}
 115
 116void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch)
 117{
 118        struct hidma_tre *tre;
 119
 120        if (tre_ch >= lldev->nr_tres) {
 121                dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch);
 122                return;
 123        }
 124
 125        tre = &lldev->trepool[tre_ch];
 126        if (atomic_read(&tre->allocated) != true) {
 127                dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch);
 128                return;
 129        }
 130
 131        atomic_set(&tre->allocated, 0);
 132}
 133
 134int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
 135                     void (*callback)(void *data), void *data, u32 *tre_ch)
 136{
 137        unsigned int i;
 138        struct hidma_tre *tre;
 139        u32 *tre_local;
 140
 141        if (!tre_ch || !lldev)
 142                return -EINVAL;
 143
 144        /* need to have at least one empty spot in the queue */
 145        for (i = 0; i < lldev->nr_tres - 1; i++) {
 146                if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1))
 147                        break;
 148        }
 149
 150        if (i == (lldev->nr_tres - 1))
 151                return -ENOMEM;
 152
 153        tre = &lldev->trepool[i];
 154        tre->dma_sig = sig;
 155        tre->dev_name = dev_name;
 156        tre->callback = callback;
 157        tre->data = data;
 158        tre->idx = i;
 159        tre->status = 0;
 160        tre->queued = 0;
 161        tre->err_code = 0;
 162        tre->err_info = 0;
 163        tre->lldev = lldev;
 164        tre_local = &tre->tre_local[0];
 165        tre_local[HIDMA_TRE_CFG_IDX] = (lldev->chidx & 0xFF) << 8;
 166        tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16);        /* set IEOB */
 167        *tre_ch = i;
 168        if (callback)
 169                callback(data);
 170        return 0;
 171}
 172
 173/*
 174 * Multiple TREs may be queued and waiting in the pending queue.
 175 */
 176static void hidma_ll_tre_complete(unsigned long arg)
 177{
 178        struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
 179        struct hidma_tre *tre;
 180
 181        while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) {
 182                /* call the user if it has been read by the hardware */
 183                if (tre->callback)
 184                        tre->callback(tre->data);
 185        }
 186}
 187
 188static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
 189                                u8 err_code)
 190{
 191        struct hidma_tre *tre;
 192        unsigned long flags;
 193        u32 tre_iterator;
 194
 195        spin_lock_irqsave(&lldev->lock, flags);
 196
 197        tre_iterator = lldev->tre_processed_off;
 198        tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
 199        if (!tre) {
 200                spin_unlock_irqrestore(&lldev->lock, flags);
 201                dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n",
 202                         tre_iterator / HIDMA_TRE_SIZE);
 203                return -EINVAL;
 204        }
 205        lldev->pending_tre_list[tre->tre_index] = NULL;
 206
 207        /*
 208         * Keep track of pending TREs that SW is expecting to receive
 209         * from HW. We got one now. Decrement our counter.
 210         */
 211        if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
 212                dev_warn(lldev->dev, "tre count mismatch on completion");
 213                atomic_set(&lldev->pending_tre_count, 0);
 214        }
 215
 216        HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
 217                                 lldev->tre_ring_size);
 218        lldev->tre_processed_off = tre_iterator;
 219        spin_unlock_irqrestore(&lldev->lock, flags);
 220
 221        tre->err_info = err_info;
 222        tre->err_code = err_code;
 223        tre->queued = 0;
 224
 225        kfifo_put(&lldev->handoff_fifo, tre);
 226        tasklet_schedule(&lldev->task);
 227
 228        return 0;
 229}
 230
 231/*
 232 * Called to handle the interrupt for the channel.
 233 * Return a positive number if TRE or EVRE were consumed on this run.
 234 * Return a positive number if there are pending TREs or EVREs.
 235 * Return 0 if there is nothing to consume or no pending TREs/EVREs found.
 236 */
 237static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
 238{
 239        u32 evre_ring_size = lldev->evre_ring_size;
 240        u32 err_info, err_code, evre_write_off;
 241        u32 evre_iterator;
 242        u32 num_completed = 0;
 243
 244        evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
 245        evre_iterator = lldev->evre_processed_off;
 246
 247        if ((evre_write_off > evre_ring_size) ||
 248            (evre_write_off % HIDMA_EVRE_SIZE)) {
 249                dev_err(lldev->dev, "HW reports invalid EVRE write offset\n");
 250                return 0;
 251        }
 252
 253        /*
 254         * By the time control reaches here the number of EVREs and TREs
 255         * may not match. Only consume the ones that hardware told us.
 256         */
 257        while ((evre_iterator != evre_write_off)) {
 258                u32 *current_evre = lldev->evre_ring + evre_iterator;
 259                u32 cfg;
 260
 261                cfg = current_evre[HIDMA_EVRE_CFG_IDX];
 262                err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS;
 263                err_info &= HIDMA_EVRE_ERRINFO_MASK;
 264                err_code =
 265                    (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
 266
 267                if (hidma_post_completed(lldev, err_info, err_code))
 268                        break;
 269
 270                HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
 271                                         evre_ring_size);
 272
 273                /*
 274                 * Read the new event descriptor written by the HW.
 275                 * As we are processing the delivered events, other events
 276                 * get queued to the SW for processing.
 277                 */
 278                evre_write_off =
 279                    readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
 280                num_completed++;
 281
 282                /*
 283                 * An error interrupt might have arrived while we are processing
 284                 * the completed interrupt.
 285                 */
 286                if (!hidma_ll_isenabled(lldev))
 287                        break;
 288        }
 289
 290        if (num_completed) {
 291                u32 evre_read_off = (lldev->evre_processed_off +
 292                                     HIDMA_EVRE_SIZE * num_completed);
 293                evre_read_off = evre_read_off % evre_ring_size;
 294                writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
 295
 296                /* record the last processed tre offset */
 297                lldev->evre_processed_off = evre_read_off;
 298        }
 299
 300        return num_completed;
 301}
 302
 303void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
 304                               u8 err_code)
 305{
 306        while (atomic_read(&lldev->pending_tre_count)) {
 307                if (hidma_post_completed(lldev, err_info, err_code))
 308                        break;
 309        }
 310}
 311
 312static int hidma_ll_reset(struct hidma_lldev *lldev)
 313{
 314        u32 val;
 315        int ret;
 316
 317        val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
 318        val &= ~(HIDMA_CH_CONTROL_MASK << 16);
 319        val |= HIDMA_CH_RESET << 16;
 320        writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
 321
 322        /*
 323         * Delay 10ms after reset to allow DMA logic to quiesce.
 324         * Do a polled read up to 1ms and 10ms maximum.
 325         */
 326        ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
 327                                 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
 328                                 1000, 10000);
 329        if (ret) {
 330                dev_err(lldev->dev, "transfer channel did not reset\n");
 331                return ret;
 332        }
 333
 334        val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
 335        val &= ~(HIDMA_CH_CONTROL_MASK << 16);
 336        val |= HIDMA_CH_RESET << 16;
 337        writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
 338
 339        /*
 340         * Delay 10ms after reset to allow DMA logic to quiesce.
 341         * Do a polled read up to 1ms and 10ms maximum.
 342         */
 343        ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
 344                                 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
 345                                 1000, 10000);
 346        if (ret)
 347                return ret;
 348
 349        lldev->trch_state = HIDMA_CH_DISABLED;
 350        lldev->evch_state = HIDMA_CH_DISABLED;
 351        return 0;
 352}
 353
 354/*
 355 * The interrupt handler for HIDMA will try to consume as many pending
 356 * EVRE from the event queue as possible. Each EVRE has an associated
 357 * TRE that holds the user interface parameters. EVRE reports the
 358 * result of the transaction. Hardware guarantees ordering between EVREs
 359 * and TREs. We use last processed offset to figure out which TRE is
 360 * associated with which EVRE. If two TREs are consumed by HW, the EVREs
 361 * are in order in the event ring.
 362 *
 363 * This handler will do a one pass for consuming EVREs. Other EVREs may
 364 * be delivered while we are working. It will try to consume incoming
 365 * EVREs one more time and return.
 366 *
 367 * For unprocessed EVREs, hardware will trigger another interrupt until
 368 * all the interrupt bits are cleared.
 369 *
 370 * Hardware guarantees that by the time interrupt is observed, all data
 371 * transactions in flight are delivered to their respective places and
 372 * are visible to the CPU.
 373 *
 374 * On demand paging for IOMMU is only supported for PCIe via PRI
 375 * (Page Request Interface) not for HIDMA. All other hardware instances
 376 * including HIDMA work on pinned DMA addresses.
 377 *
 378 * HIDMA is not aware of IOMMU presence since it follows the DMA API. All
 379 * IOMMU latency will be built into the data movement time. By the time
 380 * interrupt happens, IOMMU lookups + data movement has already taken place.
 381 *
 382 * While the first read in a typical PCI endpoint ISR flushes all outstanding
 383 * requests traditionally to the destination, this concept does not apply
 384 * here for this HW.
 385 */
 386static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
 387{
 388        unsigned long irqflags;
 389
 390        if (cause & HIDMA_ERR_INT_MASK) {
 391                dev_err(lldev->dev, "error 0x%x, disabling...\n",
 392                                cause);
 393
 394                /* Clear out pending interrupts */
 395                writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
 396
 397                /* No further submissions. */
 398                hidma_ll_disable(lldev);
 399
 400                /* Driver completes the txn and intimates the client.*/
 401                hidma_cleanup_pending_tre(lldev, 0xFF,
 402                                          HIDMA_EVRE_STATUS_ERROR);
 403
 404                return;
 405        }
 406
 407        spin_lock_irqsave(&lldev->lock, irqflags);
 408        writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
 409        spin_unlock_irqrestore(&lldev->lock, irqflags);
 410
 411        /*
 412         * Fine tuned for this HW...
 413         *
 414         * This ISR has been designed for this particular hardware. Relaxed
 415         * read and write accessors are used for performance reasons due to
 416         * interrupt delivery guarantees. Do not copy this code blindly and
 417         * expect that to work.
 418         *
 419         * Try to consume as many EVREs as possible.
 420         */
 421        hidma_handle_tre_completion(lldev);
 422}
 423
 424irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
 425{
 426        struct hidma_lldev *lldev = arg;
 427        u32 status;
 428        u32 enable;
 429        u32 cause;
 430
 431        status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
 432        enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
 433        cause = status & enable;
 434
 435        while (cause) {
 436                hidma_ll_int_handler_internal(lldev, cause);
 437
 438                /*
 439                 * Another interrupt might have arrived while we are
 440                 * processing this one. Read the new cause.
 441                 */
 442                status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
 443                enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
 444                cause = status & enable;
 445        }
 446
 447        return IRQ_HANDLED;
 448}
 449
 450irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
 451{
 452        struct hidma_lldev *lldev = arg;
 453
 454        hidma_ll_int_handler_internal(lldev, cause);
 455        return IRQ_HANDLED;
 456}
 457
 458int hidma_ll_enable(struct hidma_lldev *lldev)
 459{
 460        u32 val;
 461        int ret;
 462
 463        val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
 464        val &= ~(HIDMA_CH_CONTROL_MASK << 16);
 465        val |= HIDMA_CH_ENABLE << 16;
 466        writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
 467
 468        ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
 469                                 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
 470                                 1000, 10000);
 471        if (ret) {
 472                dev_err(lldev->dev, "event channel did not get enabled\n");
 473                return ret;
 474        }
 475
 476        val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
 477        val &= ~(HIDMA_CH_CONTROL_MASK << 16);
 478        val |= HIDMA_CH_ENABLE << 16;
 479        writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
 480
 481        ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
 482                                 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
 483                                 1000, 10000);
 484        if (ret) {
 485                dev_err(lldev->dev, "transfer channel did not get enabled\n");
 486                return ret;
 487        }
 488
 489        lldev->trch_state = HIDMA_CH_ENABLED;
 490        lldev->evch_state = HIDMA_CH_ENABLED;
 491
 492        /* enable irqs */
 493        writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
 494
 495        return 0;
 496}
 497
 498void hidma_ll_start(struct hidma_lldev *lldev)
 499{
 500        unsigned long irqflags;
 501
 502        spin_lock_irqsave(&lldev->lock, irqflags);
 503        writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG);
 504        spin_unlock_irqrestore(&lldev->lock, irqflags);
 505}
 506
 507bool hidma_ll_isenabled(struct hidma_lldev *lldev)
 508{
 509        u32 val;
 510
 511        val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
 512        lldev->trch_state = HIDMA_CH_STATE(val);
 513        val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
 514        lldev->evch_state = HIDMA_CH_STATE(val);
 515
 516        /* both channels have to be enabled before calling this function */
 517        if (hidma_is_chan_enabled(lldev->trch_state) &&
 518            hidma_is_chan_enabled(lldev->evch_state))
 519                return true;
 520
 521        return false;
 522}
 523
 524void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
 525{
 526        struct hidma_tre *tre;
 527        unsigned long flags;
 528
 529        tre = &lldev->trepool[tre_ch];
 530
 531        /* copy the TRE into its location in the TRE ring */
 532        spin_lock_irqsave(&lldev->lock, flags);
 533        tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE;
 534        lldev->pending_tre_list[tre->tre_index] = tre;
 535        memcpy(lldev->tre_ring + lldev->tre_write_offset,
 536                        &tre->tre_local[0], HIDMA_TRE_SIZE);
 537        tre->err_code = 0;
 538        tre->err_info = 0;
 539        tre->queued = 1;
 540        atomic_inc(&lldev->pending_tre_count);
 541        lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
 542                                        % lldev->tre_ring_size;
 543        spin_unlock_irqrestore(&lldev->lock, flags);
 544}
 545
 546/*
 547 * Note that even though we stop this channel if there is a pending transaction
 548 * in flight it will complete and follow the callback. This request will
 549 * prevent further requests to be made.
 550 */
 551int hidma_ll_disable(struct hidma_lldev *lldev)
 552{
 553        u32 val;
 554        int ret;
 555
 556        /* The channel needs to be in working state */
 557        if (!hidma_ll_isenabled(lldev))
 558                return 0;
 559
 560        val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
 561        val &= ~(HIDMA_CH_CONTROL_MASK << 16);
 562        val |= HIDMA_CH_SUSPEND << 16;
 563        writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
 564
 565        /*
 566         * Start the wait right after the suspend is confirmed.
 567         * Do a polled read up to 1ms and 10ms maximum.
 568         */
 569        ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
 570                                 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
 571                                 1000, 10000);
 572        if (ret)
 573                return ret;
 574
 575        val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
 576        val &= ~(HIDMA_CH_CONTROL_MASK << 16);
 577        val |= HIDMA_CH_SUSPEND << 16;
 578        writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
 579
 580        /*
 581         * Start the wait right after the suspend is confirmed
 582         * Delay up to 10ms after reset to allow DMA logic to quiesce.
 583         */
 584        ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
 585                                 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
 586                                 1000, 10000);
 587        if (ret)
 588                return ret;
 589
 590        lldev->trch_state = HIDMA_CH_SUSPENDED;
 591        lldev->evch_state = HIDMA_CH_SUSPENDED;
 592
 593        /* disable interrupts */
 594        writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
 595        return 0;
 596}
 597
 598void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
 599                                  dma_addr_t src, dma_addr_t dest, u32 len,
 600                                  u32 flags, u32 txntype)
 601{
 602        struct hidma_tre *tre;
 603        u32 *tre_local;
 604
 605        if (tre_ch >= lldev->nr_tres) {
 606                dev_err(lldev->dev, "invalid TRE number in transfer params:%d",
 607                        tre_ch);
 608                return;
 609        }
 610
 611        tre = &lldev->trepool[tre_ch];
 612        if (atomic_read(&tre->allocated) != true) {
 613                dev_err(lldev->dev, "trying to set params on an unused TRE:%d",
 614                        tre_ch);
 615                return;
 616        }
 617
 618        tre_local = &tre->tre_local[0];
 619        tre_local[HIDMA_TRE_CFG_IDX] &= ~GENMASK(7, 0);
 620        tre_local[HIDMA_TRE_CFG_IDX] |= txntype;
 621        tre_local[HIDMA_TRE_LEN_IDX] = len;
 622        tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
 623        tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
 624        tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest);
 625        tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest);
 626        tre->int_flags = flags;
 627}
 628
 629/*
 630 * Called during initialization and after an error condition
 631 * to restore hardware state.
 632 */
 633int hidma_ll_setup(struct hidma_lldev *lldev)
 634{
 635        int rc;
 636        u64 addr;
 637        u32 val;
 638        u32 nr_tres = lldev->nr_tres;
 639
 640        atomic_set(&lldev->pending_tre_count, 0);
 641        lldev->tre_processed_off = 0;
 642        lldev->evre_processed_off = 0;
 643        lldev->tre_write_offset = 0;
 644
 645        /* disable interrupts */
 646        writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
 647
 648        /* clear all pending interrupts */
 649        val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
 650        writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
 651
 652        rc = hidma_ll_reset(lldev);
 653        if (rc)
 654                return rc;
 655
 656        /*
 657         * Clear all pending interrupts again.
 658         * Otherwise, we observe reset complete interrupts.
 659         */
 660        val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
 661        writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
 662
 663        /* disable interrupts again after reset */
 664        writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
 665
 666        addr = lldev->tre_dma;
 667        writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG);
 668        writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG);
 669        writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG);
 670
 671        addr = lldev->evre_dma;
 672        writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG);
 673        writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG);
 674        writel(HIDMA_EVRE_SIZE * nr_tres,
 675                        lldev->evca + HIDMA_EVCA_RING_LEN_REG);
 676
 677        /* configure interrupts */
 678        hidma_ll_setup_irq(lldev, lldev->msi_support);
 679
 680        rc = hidma_ll_enable(lldev);
 681        if (rc)
 682                return rc;
 683
 684        return rc;
 685}
 686
 687void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
 688{
 689        u32 val;
 690
 691        lldev->msi_support = msi;
 692
 693        /* disable interrupts again after reset */
 694        writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
 695        writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
 696
 697        /* support IRQ by default */
 698        val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
 699        val &= ~0xF;
 700        if (!lldev->msi_support)
 701                val = val | 0x1;
 702        writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
 703
 704        /* clear all pending interrupts and enable them */
 705        writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
 706        writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
 707}
 708
 709struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
 710                                  void __iomem *trca, void __iomem *evca,
 711                                  u8 chidx)
 712{
 713        u32 required_bytes;
 714        struct hidma_lldev *lldev;
 715        int rc;
 716        size_t sz;
 717
 718        if (!trca || !evca || !dev || !nr_tres)
 719                return NULL;
 720
 721        /* need at least four TREs */
 722        if (nr_tres < 4)
 723                return NULL;
 724
 725        /* need an extra space */
 726        nr_tres += 1;
 727
 728        lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL);
 729        if (!lldev)
 730                return NULL;
 731
 732        lldev->evca = evca;
 733        lldev->trca = trca;
 734        lldev->dev = dev;
 735        sz = sizeof(struct hidma_tre);
 736        lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL);
 737        if (!lldev->trepool)
 738                return NULL;
 739
 740        required_bytes = sizeof(lldev->pending_tre_list[0]);
 741        lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes,
 742                                               GFP_KERNEL);
 743        if (!lldev->pending_tre_list)
 744                return NULL;
 745
 746        sz = (HIDMA_TRE_SIZE + 1) * nr_tres;
 747        lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma,
 748                                              GFP_KERNEL);
 749        if (!lldev->tre_ring)
 750                return NULL;
 751
 752        memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres);
 753        lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
 754        lldev->nr_tres = nr_tres;
 755
 756        /* the TRE ring has to be TRE_SIZE aligned */
 757        if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) {
 758                u8 tre_ring_shift;
 759
 760                tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE;
 761                tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift;
 762                lldev->tre_dma += tre_ring_shift;
 763                lldev->tre_ring += tre_ring_shift;
 764        }
 765
 766        sz = (HIDMA_EVRE_SIZE + 1) * nr_tres;
 767        lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma,
 768                                               GFP_KERNEL);
 769        if (!lldev->evre_ring)
 770                return NULL;
 771
 772        memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres);
 773        lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
 774
 775        /* the EVRE ring has to be EVRE_SIZE aligned */
 776        if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) {
 777                u8 evre_ring_shift;
 778
 779                evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE;
 780                evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift;
 781                lldev->evre_dma += evre_ring_shift;
 782                lldev->evre_ring += evre_ring_shift;
 783        }
 784        lldev->nr_tres = nr_tres;
 785        lldev->chidx = chidx;
 786
 787        sz = nr_tres * sizeof(struct hidma_tre *);
 788        rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL);
 789        if (rc)
 790                return NULL;
 791
 792        rc = hidma_ll_setup(lldev);
 793        if (rc)
 794                return NULL;
 795
 796        spin_lock_init(&lldev->lock);
 797        tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
 798        lldev->initialized = 1;
 799        writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
 800        return lldev;
 801}
 802
 803int hidma_ll_uninit(struct hidma_lldev *lldev)
 804{
 805        u32 required_bytes;
 806        int rc = 0;
 807        u32 val;
 808
 809        if (!lldev)
 810                return -ENODEV;
 811
 812        if (!lldev->initialized)
 813                return 0;
 814
 815        lldev->initialized = 0;
 816
 817        required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
 818        tasklet_kill(&lldev->task);
 819        memset(lldev->trepool, 0, required_bytes);
 820        lldev->trepool = NULL;
 821        atomic_set(&lldev->pending_tre_count, 0);
 822        lldev->tre_write_offset = 0;
 823
 824        rc = hidma_ll_reset(lldev);
 825
 826        /*
 827         * Clear all pending interrupts again.
 828         * Otherwise, we observe reset complete interrupts.
 829         */
 830        val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
 831        writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
 832        writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
 833        return rc;
 834}
 835
 836enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch)
 837{
 838        enum dma_status ret = DMA_ERROR;
 839        struct hidma_tre *tre;
 840        unsigned long flags;
 841        u8 err_code;
 842
 843        spin_lock_irqsave(&lldev->lock, flags);
 844
 845        tre = &lldev->trepool[tre_ch];
 846        err_code = tre->err_code;
 847
 848        if (err_code & HIDMA_EVRE_STATUS_COMPLETE)
 849                ret = DMA_COMPLETE;
 850        else if (err_code & HIDMA_EVRE_STATUS_ERROR)
 851                ret = DMA_ERROR;
 852        else
 853                ret = DMA_IN_PROGRESS;
 854        spin_unlock_irqrestore(&lldev->lock, flags);
 855
 856        return ret;
 857}
 858