linux/include/linux/shdma-base.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0
   2 *
   3 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
   4 *
   5 * extracted from shdma.c and headers
   6 *
   7 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
   8 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
   9 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
  10 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
  11 */
  12
  13#ifndef SHDMA_BASE_H
  14#define SHDMA_BASE_H
  15
  16#include <linux/dmaengine.h>
  17#include <linux/interrupt.h>
  18#include <linux/list.h>
  19#include <linux/types.h>
  20
  21/**
  22 * shdma_pm_state - DMA channel PM state
  23 * SHDMA_PM_ESTABLISHED:        either idle or during data transfer
  24 * SHDMA_PM_BUSY:               during the transfer preparation, when we have to
  25 *                              drop the lock temporarily
  26 * SHDMA_PM_PENDING:    transfers pending
  27 */
  28enum shdma_pm_state {
  29        SHDMA_PM_ESTABLISHED,
  30        SHDMA_PM_BUSY,
  31        SHDMA_PM_PENDING,
  32};
  33
  34struct device;
  35
  36/*
  37 * Drivers, using this library are expected to embed struct shdma_dev,
  38 * struct shdma_chan, struct shdma_desc, and struct shdma_slave
  39 * in their respective device, channel, descriptor and slave objects.
  40 */
  41
  42struct shdma_slave {
  43        int slave_id;
  44};
  45
  46struct shdma_desc {
  47        struct list_head node;
  48        struct dma_async_tx_descriptor async_tx;
  49        enum dma_transfer_direction direction;
  50        size_t partial;
  51        dma_cookie_t cookie;
  52        int chunks;
  53        int mark;
  54        bool cyclic;                    /* used as cyclic transfer */
  55};
  56
  57struct shdma_chan {
  58        spinlock_t chan_lock;           /* Channel operation lock */
  59        struct list_head ld_queue;      /* Link descriptors queue */
  60        struct list_head ld_free;       /* Free link descriptors */
  61        struct dma_chan dma_chan;       /* DMA channel */
  62        struct device *dev;             /* Channel device */
  63        void *desc;                     /* buffer for descriptor array */
  64        int desc_num;                   /* desc count */
  65        size_t max_xfer_len;            /* max transfer length */
  66        int id;                         /* Raw id of this channel */
  67        int irq;                        /* Channel IRQ */
  68        int slave_id;                   /* Client ID for slave DMA */
  69        int real_slave_id;              /* argument passed to filter function */
  70        int hw_req;                     /* DMA request line for slave DMA - same
  71                                         * as MID/RID, used with DT */
  72        enum shdma_pm_state pm_state;
  73};
  74
  75/**
  76 * struct shdma_ops - simple DMA driver operations
  77 * desc_completed:      return true, if this is the descriptor, that just has
  78 *                      completed (atomic)
  79 * halt_channel:        stop DMA channel operation (atomic)
  80 * channel_busy:        return true, if the channel is busy (atomic)
  81 * slave_addr:          return slave DMA address
  82 * desc_setup:          set up the hardware specific descriptor portion (atomic)
  83 * set_slave:           bind channel to a slave
  84 * setup_xfer:          configure channel hardware for operation (atomic)
  85 * start_xfer:          start the DMA transfer (atomic)
  86 * embedded_desc:       return Nth struct shdma_desc pointer from the
  87 *                      descriptor array
  88 * chan_irq:            process channel IRQ, return true if a transfer has
  89 *                      completed (atomic)
  90 */
  91struct shdma_ops {
  92        bool (*desc_completed)(struct shdma_chan *, struct shdma_desc *);
  93        void (*halt_channel)(struct shdma_chan *);
  94        bool (*channel_busy)(struct shdma_chan *);
  95        dma_addr_t (*slave_addr)(struct shdma_chan *);
  96        int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
  97                          dma_addr_t, dma_addr_t, size_t *);
  98        int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool);
  99        void (*setup_xfer)(struct shdma_chan *, int);
 100        void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
 101        struct shdma_desc *(*embedded_desc)(void *, int);
 102        bool (*chan_irq)(struct shdma_chan *, int);
 103        size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *);
 104};
 105
 106struct shdma_dev {
 107        struct dma_device dma_dev;
 108        struct shdma_chan **schan;
 109        const struct shdma_ops *ops;
 110        size_t desc_size;
 111};
 112
 113#define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \
 114                                i < (d)->dma_dev.chancnt; c = (d)->schan[++i])
 115
 116int shdma_request_irq(struct shdma_chan *, int,
 117                           unsigned long, const char *);
 118bool shdma_reset(struct shdma_dev *sdev);
 119void shdma_chan_probe(struct shdma_dev *sdev,
 120                           struct shdma_chan *schan, int id);
 121void shdma_chan_remove(struct shdma_chan *schan);
 122int shdma_init(struct device *dev, struct shdma_dev *sdev,
 123                    int chan_num);
 124void shdma_cleanup(struct shdma_dev *sdev);
 125#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
 126bool shdma_chan_filter(struct dma_chan *chan, void *arg);
 127#else
 128static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg)
 129{
 130        return false;
 131}
 132#endif
 133
 134#endif
 135