linux/drivers/dma/hsu/hsu.c
<<
>>
Prefs
   1/*
   2 * Core driver for the High Speed UART DMA
   3 *
   4 * Copyright (C) 2015 Intel Corporation
   5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
   6 *
   7 * Partially based on the bits found in drivers/tty/serial/mfd.c.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14/*
  15 * DMA channel allocation:
  16 * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
  17 *    Write (UART RX).
  18 * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
  19 *    port 3, and so on.
  20 */
  21
  22#include <linux/delay.h>
  23#include <linux/dmaengine.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/init.h>
  26#include <linux/module.h>
  27#include <linux/slab.h>
  28
  29#include "hsu.h"
  30
  31#define HSU_DMA_BUSWIDTHS                               \
  32        BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)       |       \
  33        BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)          |       \
  34        BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)         |       \
  35        BIT(DMA_SLAVE_BUSWIDTH_3_BYTES)         |       \
  36        BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)         |       \
  37        BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)         |       \
  38        BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
  39
  40static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
  41{
  42        hsu_chan_writel(hsuc, HSU_CH_CR, 0);
  43}
  44
  45static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
  46{
  47        u32 cr = HSU_CH_CR_CHA;
  48
  49        if (hsuc->direction == DMA_MEM_TO_DEV)
  50                cr &= ~HSU_CH_CR_CHD;
  51        else if (hsuc->direction == DMA_DEV_TO_MEM)
  52                cr |= HSU_CH_CR_CHD;
  53
  54        hsu_chan_writel(hsuc, HSU_CH_CR, cr);
  55}
  56
  57static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
  58{
  59        struct dma_slave_config *config = &hsuc->config;
  60        struct hsu_dma_desc *desc = hsuc->desc;
  61        u32 bsr = 0, mtsr = 0;  /* to shut the compiler up */
  62        u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
  63        unsigned int i, count;
  64
  65        if (hsuc->direction == DMA_MEM_TO_DEV) {
  66                bsr = config->dst_maxburst;
  67                mtsr = config->src_addr_width;
  68        } else if (hsuc->direction == DMA_DEV_TO_MEM) {
  69                bsr = config->src_maxburst;
  70                mtsr = config->dst_addr_width;
  71        }
  72
  73        hsu_chan_disable(hsuc);
  74
  75        hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
  76        hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
  77        hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
  78
  79        /* Set descriptors */
  80        count = desc->nents - desc->active;
  81        for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) {
  82                hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
  83                hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
  84
  85                /* Prepare value for DCR */
  86                dcr |= HSU_CH_DCR_DESCA(i);
  87                dcr |= HSU_CH_DCR_CHTOI(i);     /* timeout bit, see HSU Errata 1 */
  88
  89                desc->active++;
  90        }
  91        /* Only for the last descriptor in the chain */
  92        dcr |= HSU_CH_DCR_CHSOD(count - 1);
  93        dcr |= HSU_CH_DCR_CHDI(count - 1);
  94
  95        hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
  96
  97        hsu_chan_enable(hsuc);
  98}
  99
 100static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
 101{
 102        hsu_chan_disable(hsuc);
 103        hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
 104}
 105
 106static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
 107{
 108        hsu_dma_chan_start(hsuc);
 109}
 110
 111static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
 112{
 113        struct virt_dma_desc *vdesc;
 114
 115        /* Get the next descriptor */
 116        vdesc = vchan_next_desc(&hsuc->vchan);
 117        if (!vdesc) {
 118                hsuc->desc = NULL;
 119                return;
 120        }
 121
 122        list_del(&vdesc->node);
 123        hsuc->desc = to_hsu_dma_desc(vdesc);
 124
 125        /* Start the channel with a new descriptor */
 126        hsu_dma_start_channel(hsuc);
 127}
 128
 129/*
 130 *      hsu_dma_get_status() - get DMA channel status
 131 *      @chip: HSUART DMA chip
 132 *      @nr: DMA channel number
 133 *      @status: pointer for DMA Channel Status Register value
 134 *
 135 *      Description:
 136 *      The function reads and clears the DMA Channel Status Register, checks
 137 *      if it was a timeout interrupt and returns a corresponding value.
 138 *
 139 *      Caller should provide a valid pointer for the DMA Channel Status
 140 *      Register value that will be returned in @status.
 141 *
 142 *      Return:
 143 *      1 for DMA timeout status, 0 for other DMA status, or error code for
 144 *      invalid parameters or no interrupt pending.
 145 */
 146int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
 147                       u32 *status)
 148{
 149        struct hsu_dma_chan *hsuc;
 150        unsigned long flags;
 151        u32 sr;
 152
 153        /* Sanity check */
 154        if (nr >= chip->hsu->nr_channels)
 155                return -EINVAL;
 156
 157        hsuc = &chip->hsu->chan[nr];
 158
 159        /*
 160         * No matter what situation, need read clear the IRQ status
 161         * There is a bug, see Errata 5, HSD 2900918
 162         */
 163        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 164        sr = hsu_chan_readl(hsuc, HSU_CH_SR);
 165        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 166
 167        /* Check if any interrupt is pending */
 168        sr &= ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
 169        if (!sr)
 170                return -EIO;
 171
 172        /* Timeout IRQ, need wait some time, see Errata 2 */
 173        if (sr & HSU_CH_SR_DESCTO_ANY)
 174                udelay(2);
 175
 176        /*
 177         * At this point, at least one of Descriptor Time Out, Channel Error
 178         * or Descriptor Done bits must be set. Clear the Descriptor Time Out
 179         * bits and if sr is still non-zero, it must be channel error or
 180         * descriptor done which are higher priority than timeout and handled
 181         * in hsu_dma_do_irq(). Else, it must be a timeout.
 182         */
 183        sr &= ~HSU_CH_SR_DESCTO_ANY;
 184
 185        *status = sr;
 186
 187        return sr ? 0 : 1;
 188}
 189EXPORT_SYMBOL_GPL(hsu_dma_get_status);
 190
 191/*
 192 *      hsu_dma_do_irq() - DMA interrupt handler
 193 *      @chip: HSUART DMA chip
 194 *      @nr: DMA channel number
 195 *      @status: Channel Status Register value
 196 *
 197 *      Description:
 198 *      This function handles Channel Error and Descriptor Done interrupts.
 199 *      This function should be called after determining that the DMA interrupt
 200 *      is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0.
 201 *
 202 *      Return:
 203 *      0 for invalid channel number, 1 otherwise.
 204 */
 205int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
 206{
 207        struct hsu_dma_chan *hsuc;
 208        struct hsu_dma_desc *desc;
 209        unsigned long flags;
 210
 211        /* Sanity check */
 212        if (nr >= chip->hsu->nr_channels)
 213                return 0;
 214
 215        hsuc = &chip->hsu->chan[nr];
 216
 217        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 218        desc = hsuc->desc;
 219        if (desc) {
 220                if (status & HSU_CH_SR_CHE) {
 221                        desc->status = DMA_ERROR;
 222                } else if (desc->active < desc->nents) {
 223                        hsu_dma_start_channel(hsuc);
 224                } else {
 225                        vchan_cookie_complete(&desc->vdesc);
 226                        desc->status = DMA_COMPLETE;
 227                        hsu_dma_start_transfer(hsuc);
 228                }
 229        }
 230        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 231
 232        return 1;
 233}
 234EXPORT_SYMBOL_GPL(hsu_dma_do_irq);
 235
 236static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
 237{
 238        struct hsu_dma_desc *desc;
 239
 240        desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
 241        if (!desc)
 242                return NULL;
 243
 244        desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT);
 245        if (!desc->sg) {
 246                kfree(desc);
 247                return NULL;
 248        }
 249
 250        return desc;
 251}
 252
 253static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
 254{
 255        struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
 256
 257        kfree(desc->sg);
 258        kfree(desc);
 259}
 260
 261static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
 262                struct dma_chan *chan, struct scatterlist *sgl,
 263                unsigned int sg_len, enum dma_transfer_direction direction,
 264                unsigned long flags, void *context)
 265{
 266        struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 267        struct hsu_dma_desc *desc;
 268        struct scatterlist *sg;
 269        unsigned int i;
 270
 271        desc = hsu_dma_alloc_desc(sg_len);
 272        if (!desc)
 273                return NULL;
 274
 275        for_each_sg(sgl, sg, sg_len, i) {
 276                desc->sg[i].addr = sg_dma_address(sg);
 277                desc->sg[i].len = sg_dma_len(sg);
 278
 279                desc->length += sg_dma_len(sg);
 280        }
 281
 282        desc->nents = sg_len;
 283        desc->direction = direction;
 284        /* desc->active = 0 by kzalloc */
 285        desc->status = DMA_IN_PROGRESS;
 286
 287        return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
 288}
 289
 290static void hsu_dma_issue_pending(struct dma_chan *chan)
 291{
 292        struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 293        unsigned long flags;
 294
 295        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 296        if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
 297                hsu_dma_start_transfer(hsuc);
 298        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 299}
 300
 301static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
 302{
 303        struct hsu_dma_desc *desc = hsuc->desc;
 304        size_t bytes = 0;
 305        int i;
 306
 307        for (i = desc->active; i < desc->nents; i++)
 308                bytes += desc->sg[i].len;
 309
 310        i = HSU_DMA_CHAN_NR_DESC - 1;
 311        do {
 312                bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
 313        } while (--i >= 0);
 314
 315        return bytes;
 316}
 317
 318static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
 319        dma_cookie_t cookie, struct dma_tx_state *state)
 320{
 321        struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 322        struct virt_dma_desc *vdesc;
 323        enum dma_status status;
 324        size_t bytes;
 325        unsigned long flags;
 326
 327        status = dma_cookie_status(chan, cookie, state);
 328        if (status == DMA_COMPLETE)
 329                return status;
 330
 331        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 332        vdesc = vchan_find_desc(&hsuc->vchan, cookie);
 333        if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
 334                bytes = hsu_dma_active_desc_size(hsuc);
 335                dma_set_residue(state, bytes);
 336                status = hsuc->desc->status;
 337        } else if (vdesc) {
 338                bytes = to_hsu_dma_desc(vdesc)->length;
 339                dma_set_residue(state, bytes);
 340        }
 341        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 342
 343        return status;
 344}
 345
 346static int hsu_dma_slave_config(struct dma_chan *chan,
 347                                struct dma_slave_config *config)
 348{
 349        struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 350
 351        /* Check if chan will be configured for slave transfers */
 352        if (!is_slave_direction(config->direction))
 353                return -EINVAL;
 354
 355        memcpy(&hsuc->config, config, sizeof(hsuc->config));
 356
 357        return 0;
 358}
 359
 360static int hsu_dma_pause(struct dma_chan *chan)
 361{
 362        struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 363        unsigned long flags;
 364
 365        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 366        if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
 367                hsu_chan_disable(hsuc);
 368                hsuc->desc->status = DMA_PAUSED;
 369        }
 370        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 371
 372        return 0;
 373}
 374
 375static int hsu_dma_resume(struct dma_chan *chan)
 376{
 377        struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 378        unsigned long flags;
 379
 380        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 381        if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
 382                hsuc->desc->status = DMA_IN_PROGRESS;
 383                hsu_chan_enable(hsuc);
 384        }
 385        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 386
 387        return 0;
 388}
 389
 390static int hsu_dma_terminate_all(struct dma_chan *chan)
 391{
 392        struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
 393        unsigned long flags;
 394        LIST_HEAD(head);
 395
 396        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 397
 398        hsu_dma_stop_channel(hsuc);
 399        if (hsuc->desc) {
 400                hsu_dma_desc_free(&hsuc->desc->vdesc);
 401                hsuc->desc = NULL;
 402        }
 403
 404        vchan_get_all_descriptors(&hsuc->vchan, &head);
 405        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 406        vchan_dma_desc_free_list(&hsuc->vchan, &head);
 407
 408        return 0;
 409}
 410
 411static void hsu_dma_free_chan_resources(struct dma_chan *chan)
 412{
 413        vchan_free_chan_resources(to_virt_chan(chan));
 414}
 415
 416int hsu_dma_probe(struct hsu_dma_chip *chip)
 417{
 418        struct hsu_dma *hsu;
 419        void __iomem *addr = chip->regs + chip->offset;
 420        unsigned short i;
 421        int ret;
 422
 423        hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
 424        if (!hsu)
 425                return -ENOMEM;
 426
 427        chip->hsu = hsu;
 428
 429        /* Calculate nr_channels from the IO space length */
 430        hsu->nr_channels = (chip->length - chip->offset) / HSU_DMA_CHAN_LENGTH;
 431
 432        hsu->chan = devm_kcalloc(chip->dev, hsu->nr_channels,
 433                                 sizeof(*hsu->chan), GFP_KERNEL);
 434        if (!hsu->chan)
 435                return -ENOMEM;
 436
 437        INIT_LIST_HEAD(&hsu->dma.channels);
 438        for (i = 0; i < hsu->nr_channels; i++) {
 439                struct hsu_dma_chan *hsuc = &hsu->chan[i];
 440
 441                hsuc->vchan.desc_free = hsu_dma_desc_free;
 442                vchan_init(&hsuc->vchan, &hsu->dma);
 443
 444                hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
 445                hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
 446        }
 447
 448        dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
 449        dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
 450
 451        hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
 452
 453        hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
 454
 455        hsu->dma.device_issue_pending = hsu_dma_issue_pending;
 456        hsu->dma.device_tx_status = hsu_dma_tx_status;
 457
 458        hsu->dma.device_config = hsu_dma_slave_config;
 459        hsu->dma.device_pause = hsu_dma_pause;
 460        hsu->dma.device_resume = hsu_dma_resume;
 461        hsu->dma.device_terminate_all = hsu_dma_terminate_all;
 462
 463        hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
 464        hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
 465        hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 466        hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 467
 468        hsu->dma.dev = chip->dev;
 469
 470        dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK);
 471
 472        ret = dma_async_device_register(&hsu->dma);
 473        if (ret)
 474                return ret;
 475
 476        dev_info(chip->dev, "Found HSU DMA, %d channels\n", hsu->nr_channels);
 477        return 0;
 478}
 479EXPORT_SYMBOL_GPL(hsu_dma_probe);
 480
 481int hsu_dma_remove(struct hsu_dma_chip *chip)
 482{
 483        struct hsu_dma *hsu = chip->hsu;
 484        unsigned short i;
 485
 486        dma_async_device_unregister(&hsu->dma);
 487
 488        for (i = 0; i < hsu->nr_channels; i++) {
 489                struct hsu_dma_chan *hsuc = &hsu->chan[i];
 490
 491                tasklet_kill(&hsuc->vchan.task);
 492        }
 493
 494        return 0;
 495}
 496EXPORT_SYMBOL_GPL(hsu_dma_remove);
 497
 498MODULE_LICENSE("GPL v2");
 499MODULE_DESCRIPTION("High Speed UART DMA core driver");
 500MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
 501