linux/drivers/dma-buf/dma-fence-array.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * dma-fence-array: aggregate fences to be waited together
   4 *
   5 * Copyright (C) 2016 Collabora Ltd
   6 * Copyright (C) 2016 Advanced Micro Devices, Inc.
   7 * Authors:
   8 *      Gustavo Padovan <gustavo@padovan.org>
   9 *      Christian König <christian.koenig@amd.com>
  10 */
  11
  12#include <linux/export.h>
  13#include <linux/slab.h>
  14#include <linux/dma-fence-array.h>
  15
  16#define PENDING_ERROR 1
  17
  18static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
  19{
  20        return "dma_fence_array";
  21}
  22
  23static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
  24{
  25        return "unbound";
  26}
  27
  28static void dma_fence_array_set_pending_error(struct dma_fence_array *array,
  29                                              int error)
  30{
  31        /*
  32         * Propagate the first error reported by any of our fences, but only
  33         * before we ourselves are signaled.
  34         */
  35        if (error)
  36                cmpxchg(&array->base.error, PENDING_ERROR, error);
  37}
  38
  39static void dma_fence_array_clear_pending_error(struct dma_fence_array *array)
  40{
  41        /* Clear the error flag if not actually set. */
  42        cmpxchg(&array->base.error, PENDING_ERROR, 0);
  43}
  44
  45static void irq_dma_fence_array_work(struct irq_work *wrk)
  46{
  47        struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
  48
  49        dma_fence_array_clear_pending_error(array);
  50
  51        dma_fence_signal(&array->base);
  52        dma_fence_put(&array->base);
  53}
  54
  55static void dma_fence_array_cb_func(struct dma_fence *f,
  56                                    struct dma_fence_cb *cb)
  57{
  58        struct dma_fence_array_cb *array_cb =
  59                container_of(cb, struct dma_fence_array_cb, cb);
  60        struct dma_fence_array *array = array_cb->array;
  61
  62        dma_fence_array_set_pending_error(array, f->error);
  63
  64        if (atomic_dec_and_test(&array->num_pending))
  65                irq_work_queue(&array->work);
  66        else
  67                dma_fence_put(&array->base);
  68}
  69
  70static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
  71{
  72        struct dma_fence_array *array = to_dma_fence_array(fence);
  73        struct dma_fence_array_cb *cb = (void *)(&array[1]);
  74        unsigned i;
  75
  76        for (i = 0; i < array->num_fences; ++i) {
  77                cb[i].array = array;
  78                /*
  79                 * As we may report that the fence is signaled before all
  80                 * callbacks are complete, we need to take an additional
  81                 * reference count on the array so that we do not free it too
  82                 * early. The core fence handling will only hold the reference
  83                 * until we signal the array as complete (but that is now
  84                 * insufficient).
  85                 */
  86                dma_fence_get(&array->base);
  87                if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
  88                                           dma_fence_array_cb_func)) {
  89                        int error = array->fences[i]->error;
  90
  91                        dma_fence_array_set_pending_error(array, error);
  92                        dma_fence_put(&array->base);
  93                        if (atomic_dec_and_test(&array->num_pending)) {
  94                                dma_fence_array_clear_pending_error(array);
  95                                return false;
  96                        }
  97                }
  98        }
  99
 100        return true;
 101}
 102
 103static bool dma_fence_array_signaled(struct dma_fence *fence)
 104{
 105        struct dma_fence_array *array = to_dma_fence_array(fence);
 106
 107        if (atomic_read(&array->num_pending) > 0)
 108                return false;
 109
 110        dma_fence_array_clear_pending_error(array);
 111        return true;
 112}
 113
 114static void dma_fence_array_release(struct dma_fence *fence)
 115{
 116        struct dma_fence_array *array = to_dma_fence_array(fence);
 117        unsigned i;
 118
 119        for (i = 0; i < array->num_fences; ++i)
 120                dma_fence_put(array->fences[i]);
 121
 122        kfree(array->fences);
 123        dma_fence_free(fence);
 124}
 125
 126const struct dma_fence_ops dma_fence_array_ops = {
 127        .get_driver_name = dma_fence_array_get_driver_name,
 128        .get_timeline_name = dma_fence_array_get_timeline_name,
 129        .enable_signaling = dma_fence_array_enable_signaling,
 130        .signaled = dma_fence_array_signaled,
 131        .release = dma_fence_array_release,
 132};
 133EXPORT_SYMBOL(dma_fence_array_ops);
 134
 135/**
 136 * dma_fence_array_create - Create a custom fence array
 137 * @num_fences:         [in]    number of fences to add in the array
 138 * @fences:             [in]    array containing the fences
 139 * @context:            [in]    fence context to use
 140 * @seqno:              [in]    sequence number to use
 141 * @signal_on_any:      [in]    signal on any fence in the array
 142 *
 143 * Allocate a dma_fence_array object and initialize the base fence with
 144 * dma_fence_init().
 145 * In case of error it returns NULL.
 146 *
 147 * The caller should allocate the fences array with num_fences size
 148 * and fill it with the fences it wants to add to the object. Ownership of this
 149 * array is taken and dma_fence_put() is used on each fence on release.
 150 *
 151 * If @signal_on_any is true the fence array signals if any fence in the array
 152 * signals, otherwise it signals when all fences in the array signal.
 153 */
 154struct dma_fence_array *dma_fence_array_create(int num_fences,
 155                                               struct dma_fence **fences,
 156                                               u64 context, unsigned seqno,
 157                                               bool signal_on_any)
 158{
 159        struct dma_fence_array *array;
 160        size_t size = sizeof(*array);
 161
 162        /* Allocate the callback structures behind the array. */
 163        size += num_fences * sizeof(struct dma_fence_array_cb);
 164        array = kzalloc(size, GFP_KERNEL);
 165        if (!array)
 166                return NULL;
 167
 168        spin_lock_init(&array->lock);
 169        dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
 170                       context, seqno);
 171        init_irq_work(&array->work, irq_dma_fence_array_work);
 172
 173        array->num_fences = num_fences;
 174        atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
 175        array->fences = fences;
 176
 177        array->base.error = PENDING_ERROR;
 178
 179        return array;
 180}
 181EXPORT_SYMBOL(dma_fence_array_create);
 182
 183/**
 184 * dma_fence_match_context - Check if all fences are from the given context
 185 * @fence:              [in]    fence or fence array
 186 * @context:            [in]    fence context to check all fences against
 187 *
 188 * Checks the provided fence or, for a fence array, all fences in the array
 189 * against the given context. Returns false if any fence is from a different
 190 * context.
 191 */
 192bool dma_fence_match_context(struct dma_fence *fence, u64 context)
 193{
 194        struct dma_fence_array *array = to_dma_fence_array(fence);
 195        unsigned i;
 196
 197        if (!dma_fence_is_array(fence))
 198                return fence->context == context;
 199
 200        for (i = 0; i < array->num_fences; i++) {
 201                if (array->fences[i]->context != context)
 202                        return false;
 203        }
 204
 205        return true;
 206}
 207EXPORT_SYMBOL(dma_fence_match_context);
 208