linux/drivers/dma/virt-dma.h
<<
>>
Prefs
   1/*
   2 * Virtual DMA channel support for DMAengine
   3 *
   4 * Copyright (C) 2012 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#ifndef VIRT_DMA_H
  11#define VIRT_DMA_H
  12
  13#include <linux/dmaengine.h>
  14#include <linux/interrupt.h>
  15
  16#include "dmaengine.h"
  17
  18struct virt_dma_desc {
  19        struct dma_async_tx_descriptor tx;
  20        /* protected by vc.lock */
  21        struct list_head node;
  22};
  23
  24struct virt_dma_chan {
  25        struct dma_chan chan;
  26        struct tasklet_struct task;
  27        void (*desc_free)(struct virt_dma_desc *);
  28
  29        spinlock_t lock;
  30
  31        /* protected by vc.lock */
  32        struct list_head desc_allocated;
  33        struct list_head desc_submitted;
  34        struct list_head desc_issued;
  35        struct list_head desc_completed;
  36
  37        struct virt_dma_desc *cyclic;
  38        struct virt_dma_desc *vd_terminated;
  39};
  40
  41static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
  42{
  43        return container_of(chan, struct virt_dma_chan, chan);
  44}
  45
  46void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
  47void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
  48struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
  49extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
  50extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
  51
  52/**
  53 * vchan_tx_prep - prepare a descriptor
  54 * @vc: virtual channel allocating this descriptor
  55 * @vd: virtual descriptor to prepare
  56 * @tx_flags: flags argument passed in to prepare function
  57 */
  58static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
  59        struct virt_dma_desc *vd, unsigned long tx_flags)
  60{
  61        unsigned long flags;
  62
  63        dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
  64        vd->tx.flags = tx_flags;
  65        vd->tx.tx_submit = vchan_tx_submit;
  66        vd->tx.desc_free = vchan_tx_desc_free;
  67
  68        spin_lock_irqsave(&vc->lock, flags);
  69        list_add_tail(&vd->node, &vc->desc_allocated);
  70        spin_unlock_irqrestore(&vc->lock, flags);
  71
  72        return &vd->tx;
  73}
  74
  75/**
  76 * vchan_issue_pending - move submitted descriptors to issued list
  77 * @vc: virtual channel to update
  78 *
  79 * vc.lock must be held by caller
  80 */
  81static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
  82{
  83        list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
  84        return !list_empty(&vc->desc_issued);
  85}
  86
  87/**
  88 * vchan_cookie_complete - report completion of a descriptor
  89 * @vd: virtual descriptor to update
  90 *
  91 * vc.lock must be held by caller
  92 */
  93static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
  94{
  95        struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
  96        dma_cookie_t cookie;
  97
  98        cookie = vd->tx.cookie;
  99        dma_cookie_complete(&vd->tx);
 100        dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
 101                 vd, cookie);
 102        list_add_tail(&vd->node, &vc->desc_completed);
 103
 104        tasklet_schedule(&vc->task);
 105}
 106
 107/**
 108 * vchan_vdesc_fini - Free or reuse a descriptor
 109 * @vd: virtual descriptor to free/reuse
 110 */
 111static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
 112{
 113        struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
 114
 115        if (dmaengine_desc_test_reuse(&vd->tx))
 116                list_add(&vd->node, &vc->desc_allocated);
 117        else
 118                vc->desc_free(vd);
 119}
 120
 121/**
 122 * vchan_cyclic_callback - report the completion of a period
 123 * @vd: virtual descriptor
 124 */
 125static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
 126{
 127        struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
 128
 129        vc->cyclic = vd;
 130        tasklet_schedule(&vc->task);
 131}
 132
 133/**
 134 * vchan_terminate_vdesc - Disable pending cyclic callback
 135 * @vd: virtual descriptor to be terminated
 136 *
 137 * vc.lock must be held by caller
 138 */
 139static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
 140{
 141        struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
 142
 143        /* free up stuck descriptor */
 144        if (vc->vd_terminated)
 145                vchan_vdesc_fini(vc->vd_terminated);
 146
 147        vc->vd_terminated = vd;
 148        if (vc->cyclic == vd)
 149                vc->cyclic = NULL;
 150}
 151
 152/**
 153 * vchan_next_desc - peek at the next descriptor to be processed
 154 * @vc: virtual channel to obtain descriptor from
 155 *
 156 * vc.lock must be held by caller
 157 */
 158static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
 159{
 160        return list_first_entry_or_null(&vc->desc_issued,
 161                                        struct virt_dma_desc, node);
 162}
 163
 164/**
 165 * vchan_get_all_descriptors - obtain all submitted and issued descriptors
 166 * @vc: virtual channel to get descriptors from
 167 * @head: list of descriptors found
 168 *
 169 * vc.lock must be held by caller
 170 *
 171 * Removes all submitted and issued descriptors from internal lists, and
 172 * provides a list of all descriptors found
 173 */
 174static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
 175        struct list_head *head)
 176{
 177        list_splice_tail_init(&vc->desc_allocated, head);
 178        list_splice_tail_init(&vc->desc_submitted, head);
 179        list_splice_tail_init(&vc->desc_issued, head);
 180        list_splice_tail_init(&vc->desc_completed, head);
 181}
 182
 183static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
 184{
 185        struct virt_dma_desc *vd;
 186        unsigned long flags;
 187        LIST_HEAD(head);
 188
 189        spin_lock_irqsave(&vc->lock, flags);
 190        vchan_get_all_descriptors(vc, &head);
 191        list_for_each_entry(vd, &head, node)
 192                dmaengine_desc_clear_reuse(&vd->tx);
 193        spin_unlock_irqrestore(&vc->lock, flags);
 194
 195        vchan_dma_desc_free_list(vc, &head);
 196}
 197
 198/**
 199 * vchan_synchronize() - synchronize callback execution to the current context
 200 * @vc: virtual channel to synchronize
 201 *
 202 * Makes sure that all scheduled or active callbacks have finished running. For
 203 * proper operation the caller has to ensure that no new callbacks are scheduled
 204 * after the invocation of this function started.
 205 * Free up the terminated cyclic descriptor to prevent memory leakage.
 206 */
 207static inline void vchan_synchronize(struct virt_dma_chan *vc)
 208{
 209        unsigned long flags;
 210
 211        tasklet_kill(&vc->task);
 212
 213        spin_lock_irqsave(&vc->lock, flags);
 214        if (vc->vd_terminated) {
 215                vchan_vdesc_fini(vc->vd_terminated);
 216                vc->vd_terminated = NULL;
 217        }
 218        spin_unlock_irqrestore(&vc->lock, flags);
 219}
 220
 221#endif
 222