linux/drivers/dma/dmaengine.h
<<
>>
Prefs
   1/*
   2 * The contents of this file are private to DMA engine drivers, and is not
   3 * part of the API to be used by DMA engine users.
   4 */
   5#ifndef DMAENGINE_H
   6#define DMAENGINE_H
   7
   8#include <linux/bug.h>
   9#include <linux/dmaengine.h>
  10
  11/**
  12 * dma_cookie_init - initialize the cookies for a DMA channel
  13 * @chan: dma channel to initialize
  14 */
  15static inline void dma_cookie_init(struct dma_chan *chan)
  16{
  17        chan->cookie = DMA_MIN_COOKIE;
  18        chan->completed_cookie = DMA_MIN_COOKIE;
  19}
  20
  21/**
  22 * dma_cookie_assign - assign a DMA engine cookie to the descriptor
  23 * @tx: descriptor needing cookie
  24 *
  25 * Assign a unique non-zero per-channel cookie to the descriptor.
  26 * Note: caller is expected to hold a lock to prevent concurrency.
  27 */
  28static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
  29{
  30        struct dma_chan *chan = tx->chan;
  31        dma_cookie_t cookie;
  32
  33        cookie = chan->cookie + 1;
  34        if (cookie < DMA_MIN_COOKIE)
  35                cookie = DMA_MIN_COOKIE;
  36        tx->cookie = chan->cookie = cookie;
  37
  38        return cookie;
  39}
  40
  41/**
  42 * dma_cookie_complete - complete a descriptor
  43 * @tx: descriptor to complete
  44 *
  45 * Mark this descriptor complete by updating the channels completed
  46 * cookie marker.  Zero the descriptors cookie to prevent accidental
  47 * repeated completions.
  48 *
  49 * Note: caller is expected to hold a lock to prevent concurrency.
  50 */
  51static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
  52{
  53        BUG_ON(tx->cookie < DMA_MIN_COOKIE);
  54        tx->chan->completed_cookie = tx->cookie;
  55        tx->cookie = 0;
  56}
  57
  58/**
  59 * dma_cookie_status - report cookie status
  60 * @chan: dma channel
  61 * @cookie: cookie we are interested in
  62 * @state: dma_tx_state structure to return last/used cookies
  63 *
  64 * Report the status of the cookie, filling in the state structure if
  65 * non-NULL.  No locking is required.
  66 */
  67static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
  68        dma_cookie_t cookie, struct dma_tx_state *state)
  69{
  70        dma_cookie_t used, complete;
  71
  72        used = chan->cookie;
  73        complete = chan->completed_cookie;
  74        barrier();
  75        if (state) {
  76                state->last = complete;
  77                state->used = used;
  78                state->residue = 0;
  79        }
  80        return dma_async_is_complete(cookie, complete, used);
  81}
  82
  83static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
  84{
  85        if (state)
  86                state->residue = residue;
  87}
  88
  89struct dmaengine_desc_callback {
  90        dma_async_tx_callback callback;
  91        dma_async_tx_callback_result callback_result;
  92        void *callback_param;
  93};
  94
  95/**
  96 * dmaengine_desc_get_callback - get the passed in callback function
  97 * @tx: tx descriptor
  98 * @cb: temp struct to hold the callback info
  99 *
 100 * Fill the passed in cb struct with what's available in the passed in
 101 * tx descriptor struct
 102 * No locking is required.
 103 */
 104static inline void
 105dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
 106                            struct dmaengine_desc_callback *cb)
 107{
 108        cb->callback = tx->callback;
 109        cb->callback_result = tx->callback_result;
 110        cb->callback_param = tx->callback_param;
 111}
 112
 113/**
 114 * dmaengine_desc_callback_invoke - call the callback function in cb struct
 115 * @cb: temp struct that is holding the callback info
 116 * @result: transaction result
 117 *
 118 * Call the callback function provided in the cb struct with the parameter
 119 * in the cb struct.
 120 * Locking is dependent on the driver.
 121 */
 122static inline void
 123dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
 124                               const struct dmaengine_result *result)
 125{
 126        struct dmaengine_result dummy_result = {
 127                .result = DMA_TRANS_NOERROR,
 128                .residue = 0
 129        };
 130
 131        if (cb->callback_result) {
 132                if (!result)
 133                        result = &dummy_result;
 134                cb->callback_result(cb->callback_param, result);
 135        } else if (cb->callback) {
 136                cb->callback(cb->callback_param);
 137        }
 138}
 139
 140/**
 141 * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
 142 *                                      then immediately call the callback.
 143 * @tx: dma async tx descriptor
 144 * @result: transaction result
 145 *
 146 * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
 147 * in a single function since no work is necessary in between for the driver.
 148 * Locking is dependent on the driver.
 149 */
 150static inline void
 151dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
 152                                   const struct dmaengine_result *result)
 153{
 154        struct dmaengine_desc_callback cb;
 155
 156        dmaengine_desc_get_callback(tx, &cb);
 157        dmaengine_desc_callback_invoke(&cb, result);
 158}
 159
 160/**
 161 * dmaengine_desc_callback_valid - verify the callback is valid in cb
 162 * @cb: callback info struct
 163 *
 164 * Return a bool that verifies whether callback in cb is valid or not.
 165 * No locking is required.
 166 */
 167static inline bool
 168dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
 169{
 170        return (cb->callback) ? true : false;
 171}
 172
 173#endif
 174