linux/include/linux/dma-fence-chain.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * fence-chain: chain fences together in a timeline
   4 *
   5 * Copyright (C) 2018 Advanced Micro Devices, Inc.
   6 * Authors:
   7 *      Christian König <christian.koenig@amd.com>
   8 */
   9
  10#ifndef __LINUX_DMA_FENCE_CHAIN_H
  11#define __LINUX_DMA_FENCE_CHAIN_H
  12
  13#include <linux/dma-fence.h>
  14#include <linux/irq_work.h>
  15#include <linux/slab.h>
  16
  17/**
  18 * struct dma_fence_chain - fence to represent an node of a fence chain
  19 * @base: fence base class
  20 * @prev: previous fence of the chain
  21 * @prev_seqno: original previous seqno before garbage collection
  22 * @fence: encapsulated fence
  23 * @lock: spinlock for fence handling
  24 */
  25struct dma_fence_chain {
  26        struct dma_fence base;
  27        struct dma_fence __rcu *prev;
  28        u64 prev_seqno;
  29        struct dma_fence *fence;
  30        union {
  31                /**
  32                 * @cb: callback for signaling
  33                 *
  34                 * This is used to add the callback for signaling the
  35                 * complection of the fence chain. Never used at the same time
  36                 * as the irq work.
  37                 */
  38                struct dma_fence_cb cb;
  39
  40                /**
  41                 * @work: irq work item for signaling
  42                 *
  43                 * Irq work structure to allow us to add the callback without
  44                 * running into lock inversion. Never used at the same time as
  45                 * the callback.
  46                 */
  47                struct irq_work work;
  48        };
  49        spinlock_t lock;
  50};
  51
  52extern const struct dma_fence_ops dma_fence_chain_ops;
  53
  54/**
  55 * to_dma_fence_chain - cast a fence to a dma_fence_chain
  56 * @fence: fence to cast to a dma_fence_array
  57 *
  58 * Returns NULL if the fence is not a dma_fence_chain,
  59 * or the dma_fence_chain otherwise.
  60 */
  61static inline struct dma_fence_chain *
  62to_dma_fence_chain(struct dma_fence *fence)
  63{
  64        if (!fence || fence->ops != &dma_fence_chain_ops)
  65                return NULL;
  66
  67        return container_of(fence, struct dma_fence_chain, base);
  68}
  69
  70/**
  71 * dma_fence_chain_alloc
  72 *
  73 * Returns a new struct dma_fence_chain object or NULL on failure.
  74 */
  75static inline struct dma_fence_chain *dma_fence_chain_alloc(void)
  76{
  77        return kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
  78};
  79
  80/**
  81 * dma_fence_chain_free
  82 * @chain: chain node to free
  83 *
  84 * Frees up an allocated but not used struct dma_fence_chain object. This
  85 * doesn't need an RCU grace period since the fence was never initialized nor
  86 * published. After dma_fence_chain_init() has been called the fence must be
  87 * released by calling dma_fence_put(), and not through this function.
  88 */
  89static inline void dma_fence_chain_free(struct dma_fence_chain *chain)
  90{
  91        kfree(chain);
  92};
  93
  94/**
  95 * dma_fence_chain_for_each - iterate over all fences in chain
  96 * @iter: current fence
  97 * @head: starting point
  98 *
  99 * Iterate over all fences in the chain. We keep a reference to the current
 100 * fence while inside the loop which must be dropped when breaking out.
 101 */
 102#define dma_fence_chain_for_each(iter, head)    \
 103        for (iter = dma_fence_get(head); iter; \
 104             iter = dma_fence_chain_walk(iter))
 105
 106struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence);
 107int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno);
 108void dma_fence_chain_init(struct dma_fence_chain *chain,
 109                          struct dma_fence *prev,
 110                          struct dma_fence *fence,
 111                          uint64_t seqno);
 112
 113#endif /* __LINUX_DMA_FENCE_CHAIN_H */
 114