linux/drivers/staging/kpc2000/kpc_dma/dma.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2#include <linux/init.h>
   3#include <linux/module.h>
   4#include <linux/types.h>
   5#include <linux/io.h>
   6#include <linux/export.h>
   7#include <linux/slab.h>
   8#include <linux/platform_device.h>
   9#include <linux/fs.h>
  10#include <linux/rwsem.h>
  11#include "kpc_dma_driver.h"
  12
  13/**********  IRQ Handlers  **********/
  14static
  15irqreturn_t  ndd_irq_handler(int irq, void *dev_id)
  16{
  17        struct kpc_dma_device *ldev = (struct kpc_dma_device *)dev_id;
  18
  19        if ((GetEngineControl(ldev) & ENG_CTL_IRQ_ACTIVE) || (ldev->desc_completed->MyDMAAddr != GetEngineCompletePtr(ldev)))
  20                schedule_work(&ldev->irq_work);
  21
  22        return IRQ_HANDLED;
  23}
  24
  25static
  26void  ndd_irq_worker(struct work_struct *ws)
  27{
  28        struct kpc_dma_descriptor *cur;
  29        struct kpc_dma_device *eng = container_of(ws, struct kpc_dma_device, irq_work);
  30
  31        lock_engine(eng);
  32
  33        if (GetEngineCompletePtr(eng) == 0)
  34                goto out;
  35
  36        if (eng->desc_completed->MyDMAAddr == GetEngineCompletePtr(eng))
  37                goto out;
  38
  39        cur = eng->desc_completed;
  40        do {
  41                cur = cur->Next;
  42                dev_dbg(&eng->pldev->dev, "Handling completed descriptor %p (acd = %p)\n", cur, cur->acd);
  43                BUG_ON(cur == eng->desc_next); // Ordering failure.
  44
  45                if (cur->DescControlFlags & DMA_DESC_CTL_SOP) {
  46                        eng->accumulated_bytes = 0;
  47                        eng->accumulated_flags = 0;
  48                }
  49
  50                eng->accumulated_bytes += cur->DescByteCount;
  51                if (cur->DescStatusFlags & DMA_DESC_STS_ERROR)
  52                        eng->accumulated_flags |= ACD_FLAG_ENG_ACCUM_ERROR;
  53
  54                if (cur->DescStatusFlags & DMA_DESC_STS_SHORT)
  55                        eng->accumulated_flags |= ACD_FLAG_ENG_ACCUM_SHORT;
  56
  57                if (cur->DescControlFlags & DMA_DESC_CTL_EOP) {
  58                        if (cur->acd)
  59                                transfer_complete_cb(cur->acd, eng->accumulated_bytes, eng->accumulated_flags | ACD_FLAG_DONE);
  60                }
  61
  62                eng->desc_completed = cur;
  63        } while (cur->MyDMAAddr != GetEngineCompletePtr(eng));
  64
  65 out:
  66        SetClearEngineControl(eng, ENG_CTL_IRQ_ACTIVE, 0);
  67
  68        unlock_engine(eng);
  69}
  70
  71/**********  DMA Engine Init/Teardown  **********/
  72void  start_dma_engine(struct kpc_dma_device *eng)
  73{
  74        eng->desc_next       = eng->desc_pool_first;
  75        eng->desc_completed  = eng->desc_pool_last;
  76
  77        // Setup the engine pointer registers
  78        SetEngineNextPtr(eng, eng->desc_pool_first);
  79        SetEngineSWPtr(eng, eng->desc_pool_first);
  80        ClearEngineCompletePtr(eng);
  81
  82        WriteEngineControl(eng, ENG_CTL_DMA_ENABLE | ENG_CTL_IRQ_ENABLE);
  83}
  84
  85int  setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt)
  86{
  87        u32 caps;
  88        struct kpc_dma_descriptor *cur;
  89        struct kpc_dma_descriptor *next;
  90        dma_addr_t next_handle;
  91        dma_addr_t head_handle;
  92        unsigned int i;
  93        int rv;
  94
  95        caps = GetEngineCapabilities(eng);
  96
  97        if (WARN(!(caps & ENG_CAP_PRESENT), "%s() called for DMA Engine at %p which isn't present in hardware!\n", __func__, eng))
  98                return -ENXIO;
  99
 100        if (caps & ENG_CAP_DIRECTION) {
 101                eng->dir = DMA_FROM_DEVICE;
 102        } else {
 103                eng->dir = DMA_TO_DEVICE;
 104        }
 105
 106        eng->desc_pool_cnt = desc_cnt;
 107        eng->desc_pool = dma_pool_create("KPC DMA Descriptors", &eng->pldev->dev, sizeof(struct kpc_dma_descriptor), DMA_DESC_ALIGNMENT, 4096);
 108
 109        eng->desc_pool_first = dma_pool_alloc(eng->desc_pool, GFP_KERNEL | GFP_DMA, &head_handle);
 110        if (!eng->desc_pool_first) {
 111                dev_err(&eng->pldev->dev, "%s: couldn't allocate desc_pool_first!\n", __func__);
 112                dma_pool_destroy(eng->desc_pool);
 113                return -ENOMEM;
 114        }
 115
 116        eng->desc_pool_first->MyDMAAddr = head_handle;
 117        clear_desc(eng->desc_pool_first);
 118
 119        cur = eng->desc_pool_first;
 120        for (i = 1 ; i < eng->desc_pool_cnt ; i++) {
 121                next = dma_pool_alloc(eng->desc_pool, GFP_KERNEL | GFP_DMA, &next_handle);
 122                if (!next)
 123                        goto done_alloc;
 124
 125                clear_desc(next);
 126                next->MyDMAAddr = next_handle;
 127
 128                cur->DescNextDescPtr = next_handle;
 129                cur->Next = next;
 130                cur = next;
 131        }
 132
 133 done_alloc:
 134        // Link the last descriptor back to the first, so it's a circular linked list
 135        cur->Next = eng->desc_pool_first;
 136        cur->DescNextDescPtr = eng->desc_pool_first->MyDMAAddr;
 137
 138        eng->desc_pool_last = cur;
 139        eng->desc_completed = eng->desc_pool_last;
 140
 141        // Setup work queue
 142        INIT_WORK(&eng->irq_work, ndd_irq_worker);
 143
 144        // Grab IRQ line
 145        rv = request_irq(eng->irq, ndd_irq_handler, IRQF_SHARED, KP_DRIVER_NAME_DMA_CONTROLLER, eng);
 146        if (rv) {
 147                dev_err(&eng->pldev->dev, "%s: failed to request_irq: %d\n", __func__, rv);
 148                return rv;
 149        }
 150
 151        // Turn on the engine!
 152        start_dma_engine(eng);
 153        unlock_engine(eng);
 154
 155        return 0;
 156}
 157
 158void  stop_dma_engine(struct kpc_dma_device *eng)
 159{
 160        unsigned long timeout;
 161
 162        // Disable the descriptor engine
 163        WriteEngineControl(eng, 0);
 164
 165        // Wait for descriptor engine to finish current operaion
 166        timeout = jiffies + (HZ / 2);
 167        while (GetEngineControl(eng) & ENG_CTL_DMA_RUNNING) {
 168                if (time_after(jiffies, timeout)) {
 169                        dev_crit(&eng->pldev->dev, "DMA_RUNNING still asserted!\n");
 170                        break;
 171                }
 172        }
 173
 174        // Request a reset
 175        WriteEngineControl(eng, ENG_CTL_DMA_RESET_REQUEST);
 176
 177        // Wait for reset request to be processed
 178        timeout = jiffies + (HZ / 2);
 179        while (GetEngineControl(eng) & (ENG_CTL_DMA_RUNNING | ENG_CTL_DMA_RESET_REQUEST)) {
 180                if (time_after(jiffies, timeout)) {
 181                        dev_crit(&eng->pldev->dev, "ENG_CTL_DMA_RESET_REQUEST still asserted!\n");
 182                        break;
 183                }
 184        }
 185
 186        // Request a reset
 187        WriteEngineControl(eng, ENG_CTL_DMA_RESET);
 188
 189        // And wait for reset to complete
 190        timeout = jiffies + (HZ / 2);
 191        while (GetEngineControl(eng) & ENG_CTL_DMA_RESET) {
 192                if (time_after(jiffies, timeout)) {
 193                        dev_crit(&eng->pldev->dev, "DMA_RESET still asserted!\n");
 194                        break;
 195                }
 196        }
 197
 198        // Clear any persistent bits just to make sure there is no residue from the reset
 199        SetClearEngineControl(eng, (ENG_CTL_IRQ_ACTIVE | ENG_CTL_DESC_COMPLETE | ENG_CTL_DESC_ALIGN_ERR | ENG_CTL_DESC_FETCH_ERR | ENG_CTL_SW_ABORT_ERR | ENG_CTL_DESC_CHAIN_END | ENG_CTL_DMA_WAITING_PERSIST), 0);
 200
 201        // Reset performance counters
 202
 203        // Completely disable the engine
 204        WriteEngineControl(eng, 0);
 205}
 206
 207void  destroy_dma_engine(struct kpc_dma_device *eng)
 208{
 209        struct kpc_dma_descriptor *cur;
 210        dma_addr_t cur_handle;
 211        unsigned int i;
 212
 213        stop_dma_engine(eng);
 214
 215        cur = eng->desc_pool_first;
 216        cur_handle = eng->desc_pool_first->MyDMAAddr;
 217
 218        for (i = 0 ; i < eng->desc_pool_cnt ; i++) {
 219                struct kpc_dma_descriptor *next = cur->Next;
 220                dma_addr_t next_handle = cur->DescNextDescPtr;
 221
 222                dma_pool_free(eng->desc_pool, cur, cur_handle);
 223                cur_handle = next_handle;
 224                cur = next;
 225        }
 226
 227        dma_pool_destroy(eng->desc_pool);
 228
 229        free_irq(eng->irq, eng);
 230}
 231
 232/**********  Helper Functions  **********/
 233int  count_descriptors_available(struct kpc_dma_device *eng)
 234{
 235        u32 count = 0;
 236        struct kpc_dma_descriptor *cur = eng->desc_next;
 237
 238        while (cur != eng->desc_completed) {
 239                BUG_ON(cur == NULL);
 240                count++;
 241                cur = cur->Next;
 242        }
 243        return count;
 244}
 245
 246void  clear_desc(struct kpc_dma_descriptor *desc)
 247{
 248        if (!desc)
 249                return;
 250        desc->DescByteCount         = 0;
 251        desc->DescStatusErrorFlags  = 0;
 252        desc->DescStatusFlags       = 0;
 253        desc->DescUserControlLS     = 0;
 254        desc->DescUserControlMS     = 0;
 255        desc->DescCardAddrLS        = 0;
 256        desc->DescBufferByteCount   = 0;
 257        desc->DescCardAddrMS        = 0;
 258        desc->DescControlFlags      = 0;
 259        desc->DescSystemAddrLS      = 0;
 260        desc->DescSystemAddrMS      = 0;
 261        desc->acd = NULL;
 262}
 263