linux/drivers/dma/hsu/hsu.c
<<
>>
Prefs
   1/*
   2 * Core driver for the High Speed UART DMA
   3 *
   4 * Copyright (C) 2015 Intel Corporation
   5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
   6 *
   7 * Partially based on the bits found in drivers/tty/serial/mfd.c.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14/*
  15 * DMA channel allocation:
  16 * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
  17 *    Write (UART RX).
  18 * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
  19 *    port 3, and so on.
  20 */
  21
  22#include <linux/delay.h>
  23#include <linux/dmaengine.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/init.h>
  26#include <linux/module.h>
  27#include <linux/slab.h>
  28
  29#include "hsu.h"
  30
  31#define HSU_DMA_BUSWIDTHS                               \
  32        BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)       |       \
  33        BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)          |       \
  34        BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)         |       \
  35        BIT(DMA_SLAVE_BUSWIDTH_3_BYTES)         |       \
  36        BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)         |       \
  37        BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)         |       \
  38        BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
  39
  40static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
  41{
  42        hsu_chan_writel(hsuc, HSU_CH_CR, 0);
  43}
  44
  45static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
  46{
  47        u32 cr = HSU_CH_CR_CHA;
  48
  49        if (hsuc->direction == DMA_MEM_TO_DEV)
  50                cr &= ~HSU_CH_CR_CHD;
  51        else if (hsuc->direction == DMA_DEV_TO_MEM)
  52                cr |= HSU_CH_CR_CHD;
  53
  54        hsu_chan_writel(hsuc, HSU_CH_CR, cr);
  55}
  56
  57static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
  58{
  59        struct dma_slave_config *config = &hsuc->config;
  60        struct hsu_dma_desc *desc = hsuc->desc;
  61        u32 bsr = 0, mtsr = 0;  /* to shut the compiler up */
  62        u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
  63        unsigned int i, count;
  64
  65        if (hsuc->direction == DMA_MEM_TO_DEV) {
  66                bsr = config->dst_maxburst;
  67                mtsr = config->src_addr_width;
  68        } else if (hsuc->direction == DMA_DEV_TO_MEM) {
  69                bsr = config->src_maxburst;
  70                mtsr = config->dst_addr_width;
  71        }
  72
  73        hsu_chan_disable(hsuc);
  74
  75        hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
  76        hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
  77        hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
  78
  79        /* Set descriptors */
  80        count = desc->nents - desc->active;
  81        for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) {
  82                hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
  83                hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
  84
  85                /* Prepare value for DCR */
  86                dcr |= HSU_CH_DCR_DESCA(i);
  87                dcr |= HSU_CH_DCR_CHTOI(i);     /* timeout bit, see HSU Errata 1 */
  88
  89                desc->active++;
  90        }
  91        /* Only for the last descriptor in the chain */
  92        dcr |= HSU_CH_DCR_CHSOD(count - 1);
  93        dcr |= HSU_CH_DCR_CHDI(count - 1);
  94
  95        hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
  96
  97        hsu_chan_enable(hsuc);
  98}
  99
 100static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
 101{
 102        hsu_chan_disable(hsuc);
 103        hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
 104}
 105
 106static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
 107{
 108        hsu_dma_chan_start(hsuc);
 109}
 110
 111static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
 112{
 113        struct virt_dma_desc *vdesc;
 114
 115        /* Get the next descriptor */
 116        vdesc = vchan_next_desc(&hsuc->vchan);
 117        if (!vdesc) {
 118                hsuc->desc = NULL;
 119                return;
 120        }
 121
 122        list_del(&vdesc->node);
 123        hsuc->desc = to_hsu_dma_desc(vdesc);
 124
 125        /* Start the channel with a new descriptor */
 126        hsu_dma_start_channel(hsuc);
 127}
 128
 129static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
 130{
 131        unsigned long flags;
 132        u32 sr;
 133
 134        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 135        sr = hsu_chan_readl(hsuc, HSU_CH_SR);
 136        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 137
 138        return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
 139}
 140
 141irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
 142{
 143        struct hsu_dma_chan *hsuc;
 144        struct hsu_dma_desc *desc;
 145        unsigned long flags;
 146        u32 sr;
 147
 148        /* Sanity check */
 149        if (nr >= chip->hsu->nr_channels)
 150                return IRQ_NONE;
 151
 152        hsuc = &chip->hsu->chan[nr];
 153
 154        /*
 155         * No matter what situation, need read clear the IRQ status
 156         * There is a bug, see Errata 5, HSD 2900918
 157         */
 158        sr = hsu_dma_chan_get_sr(hsuc);
 159        if (!sr)
 160                return IRQ_NONE;
 161
 162        /* Timeout IRQ, need wait some time, see Errata 2 */
 163        if (sr & HSU_CH_SR_DESCTO_ANY)
 164                udelay(2);
 165
 166        sr &= ~HSU_CH_SR_DESCTO_ANY;
 167        if (!sr)
 168                return IRQ_HANDLED;
 169
 170        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 171        desc = hsuc->desc;
 172        if (desc) {
 173                if (sr & HSU_CH_SR_CHE) {
 174                        desc->status = DMA_ERROR;
 175                } else if (desc->active < desc->nents) {
 176                        hsu_dma_start_channel(hsuc);
 177                } else {
 178                        vchan_cookie_complete(&desc->vdesc);
 179                        desc->status = DMA_COMPLETE;
 180                        hsu_dma_start_transfer(hsuc);
 181                }
 182        }
 183        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 184
 185        return IRQ_HANDLED;
 186}
 187EXPORT_SYMBOL_GPL(hsu_dma_irq);
 188
 189static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
 190{
 191        struct hsu_dma_desc *desc;
 192
 193        desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
 194        if (!desc)
 195                return NULL;
 196
 197        desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT);
 198        if (!desc->sg) {
 199                kfree(desc);
 200                return NULL;
 201        }
 202
 203        return desc;
 204}
 205
 206static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
 207{
 208        struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
 209
 210        kfree(desc->sg);
 211        kfree(desc);
 212}
 213
 214static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
 215                struct dma_chan *chan, struct scatterlist *sgl,
 216                unsigned int sg_len, enum dma_transfer_direction direction,
 217                unsigned long flags, void *context)
 218{
 219        struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 220        struct hsu_dma_desc *desc;
 221        struct scatterlist *sg;
 222        unsigned int i;
 223
 224        desc = hsu_dma_alloc_desc(sg_len);
 225        if (!desc)
 226                return NULL;
 227
 228        for_each_sg(sgl, sg, sg_len, i) {
 229                desc->sg[i].addr = sg_dma_address(sg);
 230                desc->sg[i].len = sg_dma_len(sg);
 231
 232                desc->length += sg_dma_len(sg);
 233        }
 234
 235        desc->nents = sg_len;
 236        desc->direction = direction;
 237        /* desc->active = 0 by kzalloc */
 238        desc->status = DMA_IN_PROGRESS;
 239
 240        return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
 241}
 242
 243static void hsu_dma_issue_pending(struct dma_chan *chan)
 244{
 245        struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 246        unsigned long flags;
 247
 248        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 249        if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
 250                hsu_dma_start_transfer(hsuc);
 251        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 252}
 253
 254static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
 255{
 256        struct hsu_dma_desc *desc = hsuc->desc;
 257        size_t bytes = 0;
 258        int i;
 259
 260        for (i = desc->active; i < desc->nents; i++)
 261                bytes += desc->sg[i].len;
 262
 263        i = HSU_DMA_CHAN_NR_DESC - 1;
 264        do {
 265                bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
 266        } while (--i >= 0);
 267
 268        return bytes;
 269}
 270
 271static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
 272        dma_cookie_t cookie, struct dma_tx_state *state)
 273{
 274        struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 275        struct virt_dma_desc *vdesc;
 276        enum dma_status status;
 277        size_t bytes;
 278        unsigned long flags;
 279
 280        status = dma_cookie_status(chan, cookie, state);
 281        if (status == DMA_COMPLETE)
 282                return status;
 283
 284        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 285        vdesc = vchan_find_desc(&hsuc->vchan, cookie);
 286        if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
 287                bytes = hsu_dma_active_desc_size(hsuc);
 288                dma_set_residue(state, bytes);
 289                status = hsuc->desc->status;
 290        } else if (vdesc) {
 291                bytes = to_hsu_dma_desc(vdesc)->length;
 292                dma_set_residue(state, bytes);
 293        }
 294        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 295
 296        return status;
 297}
 298
 299static int hsu_dma_slave_config(struct dma_chan *chan,
 300                                struct dma_slave_config *config)
 301{
 302        struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 303
 304        /* Check if chan will be configured for slave transfers */
 305        if (!is_slave_direction(config->direction))
 306                return -EINVAL;
 307
 308        memcpy(&hsuc->config, config, sizeof(hsuc->config));
 309
 310        return 0;
 311}
 312
 313static int hsu_dma_pause(struct dma_chan *chan)
 314{
 315        struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 316        unsigned long flags;
 317
 318        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 319        if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
 320                hsu_chan_disable(hsuc);
 321                hsuc->desc->status = DMA_PAUSED;
 322        }
 323        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 324
 325        return 0;
 326}
 327
 328static int hsu_dma_resume(struct dma_chan *chan)
 329{
 330        struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 331        unsigned long flags;
 332
 333        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 334        if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
 335                hsuc->desc->status = DMA_IN_PROGRESS;
 336                hsu_chan_enable(hsuc);
 337        }
 338        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 339
 340        return 0;
 341}
 342
 343static int hsu_dma_terminate_all(struct dma_chan *chan)
 344{
 345        struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 346        unsigned long flags;
 347        LIST_HEAD(head);
 348
 349        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 350
 351        hsu_dma_stop_channel(hsuc);
 352        if (hsuc->desc) {
 353                hsu_dma_desc_free(&hsuc->desc->vdesc);
 354                hsuc->desc = NULL;
 355        }
 356
 357        vchan_get_all_descriptors(&hsuc->vchan, &head);
 358        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 359        vchan_dma_desc_free_list(&hsuc->vchan, &head);
 360
 361        return 0;
 362}
 363
 364static void hsu_dma_free_chan_resources(struct dma_chan *chan)
 365{
 366        vchan_free_chan_resources(to_virt_chan(chan));
 367}
 368
 369int hsu_dma_probe(struct hsu_dma_chip *chip)
 370{
 371        struct hsu_dma *hsu;
 372        void __iomem *addr = chip->regs + chip->offset;
 373        unsigned short i;
 374        int ret;
 375
 376        hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
 377        if (!hsu)
 378                return -ENOMEM;
 379
 380        chip->hsu = hsu;
 381
 382        /* Calculate nr_channels from the IO space length */
 383        hsu->nr_channels = (chip->length - chip->offset) / HSU_DMA_CHAN_LENGTH;
 384
 385        hsu->chan = devm_kcalloc(chip->dev, hsu->nr_channels,
 386                                 sizeof(*hsu->chan), GFP_KERNEL);
 387        if (!hsu->chan)
 388                return -ENOMEM;
 389
 390        INIT_LIST_HEAD(&hsu->dma.channels);
 391        for (i = 0; i < hsu->nr_channels; i++) {
 392                struct hsu_dma_chan *hsuc = &hsu->chan[i];
 393
 394                hsuc->vchan.desc_free = hsu_dma_desc_free;
 395                vchan_init(&hsuc->vchan, &hsu->dma);
 396
 397                hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
 398                hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
 399        }
 400
 401        dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
 402        dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
 403
 404        hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
 405
 406        hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
 407
 408        hsu->dma.device_issue_pending = hsu_dma_issue_pending;
 409        hsu->dma.device_tx_status = hsu_dma_tx_status;
 410
 411        hsu->dma.device_config = hsu_dma_slave_config;
 412        hsu->dma.device_pause = hsu_dma_pause;
 413        hsu->dma.device_resume = hsu_dma_resume;
 414        hsu->dma.device_terminate_all = hsu_dma_terminate_all;
 415
 416        hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
 417        hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
 418        hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 419        hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 420
 421        hsu->dma.dev = chip->dev;
 422
 423        dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK);
 424
 425        ret = dma_async_device_register(&hsu->dma);
 426        if (ret)
 427                return ret;
 428
 429        dev_info(chip->dev, "Found HSU DMA, %d channels\n", hsu->nr_channels);
 430        return 0;
 431}
 432EXPORT_SYMBOL_GPL(hsu_dma_probe);
 433
 434int hsu_dma_remove(struct hsu_dma_chip *chip)
 435{
 436        struct hsu_dma *hsu = chip->hsu;
 437        unsigned short i;
 438
 439        dma_async_device_unregister(&hsu->dma);
 440
 441        for (i = 0; i < hsu->nr_channels; i++) {
 442                struct hsu_dma_chan *hsuc = &hsu->chan[i];
 443
 444                tasklet_kill(&hsuc->vchan.task);
 445        }
 446
 447        return 0;
 448}
 449EXPORT_SYMBOL_GPL(hsu_dma_remove);
 450
 451MODULE_LICENSE("GPL v2");
 452MODULE_DESCRIPTION("High Speed UART DMA core driver");
 453MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
 454