linux/include/linux/dma-fence.h
<<
>>
Prefs
   1/*
   2 * Fence mechanism for dma-buf to allow for asynchronous dma access
   3 *
   4 * Copyright (C) 2012 Canonical Ltd
   5 * Copyright (C) 2012 Texas Instruments
   6 *
   7 * Authors:
   8 * Rob Clark <robdclark@gmail.com>
   9 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
  10 *
  11 * This program is free software; you can redistribute it and/or modify it
  12 * under the terms of the GNU General Public License version 2 as published by
  13 * the Free Software Foundation.
  14 *
  15 * This program is distributed in the hope that it will be useful, but WITHOUT
  16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  18 * more details.
  19 */
  20
  21#ifndef __LINUX_DMA_FENCE_H
  22#define __LINUX_DMA_FENCE_H
  23
  24#include <linux/err.h>
  25#include <linux/wait.h>
  26#include <linux/list.h>
  27#include <linux/bitops.h>
  28#include <linux/kref.h>
  29#include <linux/sched.h>
  30#include <linux/printk.h>
  31#include <linux/rcupdate.h>
  32
  33struct dma_fence;
  34struct dma_fence_ops;
  35struct dma_fence_cb;
  36
  37/**
  38 * struct dma_fence - software synchronization primitive
  39 * @refcount: refcount for this fence
  40 * @ops: dma_fence_ops associated with this fence
  41 * @rcu: used for releasing fence with kfree_rcu
  42 * @cb_list: list of all callbacks to call
  43 * @lock: spin_lock_irqsave used for locking
  44 * @context: execution context this fence belongs to, returned by
  45 *           dma_fence_context_alloc()
  46 * @seqno: the sequence number of this fence inside the execution context,
  47 * can be compared to decide which fence would be signaled later.
  48 * @flags: A mask of DMA_FENCE_FLAG_* defined below
  49 * @timestamp: Timestamp when the fence was signaled.
  50 * @error: Optional, only valid if < 0, must be set before calling
  51 * dma_fence_signal, indicates that the fence has completed with an error.
  52 *
  53 * the flags member must be manipulated and read using the appropriate
  54 * atomic ops (bit_*), so taking the spinlock will not be needed most
  55 * of the time.
  56 *
  57 * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
  58 * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
  59 * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
  60 * implementer of the fence for its own purposes. Can be used in different
  61 * ways by different fence implementers, so do not rely on this.
  62 *
  63 * Since atomic bitops are used, this is not guaranteed to be the case.
  64 * Particularly, if the bit was set, but dma_fence_signal was called right
  65 * before this bit was set, it would have been able to set the
  66 * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
  67 * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting
  68 * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
  69 * after dma_fence_signal was called, any enable_signaling call will have either
  70 * been completed, or never called at all.
  71 */
  72struct dma_fence {
  73        struct kref refcount;
  74        const struct dma_fence_ops *ops;
  75        struct rcu_head rcu;
  76        struct list_head cb_list;
  77        spinlock_t *lock;
  78        u64 context;
  79        unsigned seqno;
  80        unsigned long flags;
  81        ktime_t timestamp;
  82        int error;
  83};
  84
  85enum dma_fence_flag_bits {
  86        DMA_FENCE_FLAG_SIGNALED_BIT,
  87        DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
  88        DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
  89};
  90
  91typedef void (*dma_fence_func_t)(struct dma_fence *fence,
  92                                 struct dma_fence_cb *cb);
  93
  94/**
  95 * struct dma_fence_cb - callback for dma_fence_add_callback
  96 * @node: used by dma_fence_add_callback to append this struct to fence::cb_list
  97 * @func: dma_fence_func_t to call
  98 *
  99 * This struct will be initialized by dma_fence_add_callback, additional
 100 * data can be passed along by embedding dma_fence_cb in another struct.
 101 */
 102struct dma_fence_cb {
 103        struct list_head node;
 104        dma_fence_func_t func;
 105};
 106
 107/**
 108 * struct dma_fence_ops - operations implemented for fence
 109 * @get_driver_name: returns the driver name.
 110 * @get_timeline_name: return the name of the context this fence belongs to.
 111 * @enable_signaling: enable software signaling of fence.
 112 * @signaled: [optional] peek whether the fence is signaled, can be null.
 113 * @wait: custom wait implementation, or dma_fence_default_wait.
 114 * @release: [optional] called on destruction of fence, can be null
 115 * @fill_driver_data: [optional] callback to fill in free-form debug info
 116 * Returns amount of bytes filled, or -errno.
 117 * @fence_value_str: [optional] fills in the value of the fence as a string
 118 * @timeline_value_str: [optional] fills in the current value of the timeline
 119 * as a string
 120 *
 121 * Notes on enable_signaling:
 122 * For fence implementations that have the capability for hw->hw
 123 * signaling, they can implement this op to enable the necessary
 124 * irqs, or insert commands into cmdstream, etc.  This is called
 125 * in the first wait() or add_callback() path to let the fence
 126 * implementation know that there is another driver waiting on
 127 * the signal (ie. hw->sw case).
 128 *
 129 * This function can be called called from atomic context, but not
 130 * from irq context, so normal spinlocks can be used.
 131 *
 132 * A return value of false indicates the fence already passed,
 133 * or some failure occurred that made it impossible to enable
 134 * signaling. True indicates successful enabling.
 135 *
 136 * fence->error may be set in enable_signaling, but only when false is
 137 * returned.
 138 *
 139 * Calling dma_fence_signal before enable_signaling is called allows
 140 * for a tiny race window in which enable_signaling is called during,
 141 * before, or after dma_fence_signal. To fight this, it is recommended
 142 * that before enable_signaling returns true an extra reference is
 143 * taken on the fence, to be released when the fence is signaled.
 144 * This will mean dma_fence_signal will still be called twice, but
 145 * the second time will be a noop since it was already signaled.
 146 *
 147 * Notes on signaled:
 148 * May set fence->error if returning true.
 149 *
 150 * Notes on wait:
 151 * Must not be NULL, set to dma_fence_default_wait for default implementation.
 152 * the dma_fence_default_wait implementation should work for any fence, as long
 153 * as enable_signaling works correctly.
 154 *
 155 * Must return -ERESTARTSYS if the wait is intr = true and the wait was
 156 * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
 157 * timed out. Can also return other error values on custom implementations,
 158 * which should be treated as if the fence is signaled. For example a hardware
 159 * lockup could be reported like that.
 160 *
 161 * Notes on release:
 162 * Can be NULL, this function allows additional commands to run on
 163 * destruction of the fence. Can be called from irq context.
 164 * If pointer is set to NULL, kfree will get called instead.
 165 */
 166
 167struct dma_fence_ops {
 168        const char * (*get_driver_name)(struct dma_fence *fence);
 169        const char * (*get_timeline_name)(struct dma_fence *fence);
 170        bool (*enable_signaling)(struct dma_fence *fence);
 171        bool (*signaled)(struct dma_fence *fence);
 172        signed long (*wait)(struct dma_fence *fence,
 173                            bool intr, signed long timeout);
 174        void (*release)(struct dma_fence *fence);
 175
 176        int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
 177        void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
 178        void (*timeline_value_str)(struct dma_fence *fence,
 179                                   char *str, int size);
 180};
 181
 182void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
 183                    spinlock_t *lock, u64 context, unsigned seqno);
 184
 185void dma_fence_release(struct kref *kref);
 186void dma_fence_free(struct dma_fence *fence);
 187
 188/**
 189 * dma_fence_put - decreases refcount of the fence
 190 * @fence:      [in]    fence to reduce refcount of
 191 */
 192static inline void dma_fence_put(struct dma_fence *fence)
 193{
 194        if (fence)
 195                kref_put(&fence->refcount, dma_fence_release);
 196}
 197
 198/**
 199 * dma_fence_get - increases refcount of the fence
 200 * @fence:      [in]    fence to increase refcount of
 201 *
 202 * Returns the same fence, with refcount increased by 1.
 203 */
 204static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
 205{
 206        if (fence)
 207                kref_get(&fence->refcount);
 208        return fence;
 209}
 210
 211/**
 212 * dma_fence_get_rcu - get a fence from a reservation_object_list with
 213 *                     rcu read lock
 214 * @fence:      [in]    fence to increase refcount of
 215 *
 216 * Function returns NULL if no refcount could be obtained, or the fence.
 217 */
 218static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
 219{
 220        if (kref_get_unless_zero(&fence->refcount))
 221                return fence;
 222        else
 223                return NULL;
 224}
 225
 226/**
 227 * dma_fence_get_rcu_safe  - acquire a reference to an RCU tracked fence
 228 * @fencep:     [in]    pointer to fence to increase refcount of
 229 *
 230 * Function returns NULL if no refcount could be obtained, or the fence.
 231 * This function handles acquiring a reference to a fence that may be
 232 * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU),
 233 * so long as the caller is using RCU on the pointer to the fence.
 234 *
 235 * An alternative mechanism is to employ a seqlock to protect a bunch of
 236 * fences, such as used by struct reservation_object. When using a seqlock,
 237 * the seqlock must be taken before and checked after a reference to the
 238 * fence is acquired (as shown here).
 239 *
 240 * The caller is required to hold the RCU read lock.
 241 */
 242static inline struct dma_fence *
 243dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
 244{
 245        do {
 246                struct dma_fence *fence;
 247
 248                fence = rcu_dereference(*fencep);
 249                if (!fence || !dma_fence_get_rcu(fence))
 250                        return NULL;
 251
 252                /* The atomic_inc_not_zero() inside dma_fence_get_rcu()
 253                 * provides a full memory barrier upon success (such as now).
 254                 * This is paired with the write barrier from assigning
 255                 * to the __rcu protected fence pointer so that if that
 256                 * pointer still matches the current fence, we know we
 257                 * have successfully acquire a reference to it. If it no
 258                 * longer matches, we are holding a reference to some other
 259                 * reallocated pointer. This is possible if the allocator
 260                 * is using a freelist like SLAB_TYPESAFE_BY_RCU where the
 261                 * fence remains valid for the RCU grace period, but it
 262                 * may be reallocated. When using such allocators, we are
 263                 * responsible for ensuring the reference we get is to
 264                 * the right fence, as below.
 265                 */
 266                if (fence == rcu_access_pointer(*fencep))
 267                        return rcu_pointer_handoff(fence);
 268
 269                dma_fence_put(fence);
 270        } while (1);
 271}
 272
 273int dma_fence_signal(struct dma_fence *fence);
 274int dma_fence_signal_locked(struct dma_fence *fence);
 275signed long dma_fence_default_wait(struct dma_fence *fence,
 276                                   bool intr, signed long timeout);
 277int dma_fence_add_callback(struct dma_fence *fence,
 278                           struct dma_fence_cb *cb,
 279                           dma_fence_func_t func);
 280bool dma_fence_remove_callback(struct dma_fence *fence,
 281                               struct dma_fence_cb *cb);
 282void dma_fence_enable_sw_signaling(struct dma_fence *fence);
 283
 284/**
 285 * dma_fence_is_signaled_locked - Return an indication if the fence
 286 *                                is signaled yet.
 287 * @fence:      [in]    the fence to check
 288 *
 289 * Returns true if the fence was already signaled, false if not. Since this
 290 * function doesn't enable signaling, it is not guaranteed to ever return
 291 * true if dma_fence_add_callback, dma_fence_wait or
 292 * dma_fence_enable_sw_signaling haven't been called before.
 293 *
 294 * This function requires fence->lock to be held.
 295 */
 296static inline bool
 297dma_fence_is_signaled_locked(struct dma_fence *fence)
 298{
 299        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 300                return true;
 301
 302        if (fence->ops->signaled && fence->ops->signaled(fence)) {
 303                dma_fence_signal_locked(fence);
 304                return true;
 305        }
 306
 307        return false;
 308}
 309
 310/**
 311 * dma_fence_is_signaled - Return an indication if the fence is signaled yet.
 312 * @fence:      [in]    the fence to check
 313 *
 314 * Returns true if the fence was already signaled, false if not. Since this
 315 * function doesn't enable signaling, it is not guaranteed to ever return
 316 * true if dma_fence_add_callback, dma_fence_wait or
 317 * dma_fence_enable_sw_signaling haven't been called before.
 318 *
 319 * It's recommended for seqno fences to call dma_fence_signal when the
 320 * operation is complete, it makes it possible to prevent issues from
 321 * wraparound between time of issue and time of use by checking the return
 322 * value of this function before calling hardware-specific wait instructions.
 323 */
 324static inline bool
 325dma_fence_is_signaled(struct dma_fence *fence)
 326{
 327        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 328                return true;
 329
 330        if (fence->ops->signaled && fence->ops->signaled(fence)) {
 331                dma_fence_signal(fence);
 332                return true;
 333        }
 334
 335        return false;
 336}
 337
 338/**
 339 * dma_fence_is_later - return if f1 is chronologically later than f2
 340 * @f1: [in]    the first fence from the same context
 341 * @f2: [in]    the second fence from the same context
 342 *
 343 * Returns true if f1 is chronologically later than f2. Both fences must be
 344 * from the same context, since a seqno is not re-used across contexts.
 345 */
 346static inline bool dma_fence_is_later(struct dma_fence *f1,
 347                                      struct dma_fence *f2)
 348{
 349        if (WARN_ON(f1->context != f2->context))
 350                return false;
 351
 352        return (int)(f1->seqno - f2->seqno) > 0;
 353}
 354
 355/**
 356 * dma_fence_later - return the chronologically later fence
 357 * @f1: [in]    the first fence from the same context
 358 * @f2: [in]    the second fence from the same context
 359 *
 360 * Returns NULL if both fences are signaled, otherwise the fence that would be
 361 * signaled last. Both fences must be from the same context, since a seqno is
 362 * not re-used across contexts.
 363 */
 364static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
 365                                                struct dma_fence *f2)
 366{
 367        if (WARN_ON(f1->context != f2->context))
 368                return NULL;
 369
 370        /*
 371         * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never
 372         * have been set if enable_signaling wasn't called, and enabling that
 373         * here is overkill.
 374         */
 375        if (dma_fence_is_later(f1, f2))
 376                return dma_fence_is_signaled(f1) ? NULL : f1;
 377        else
 378                return dma_fence_is_signaled(f2) ? NULL : f2;
 379}
 380
 381/**
 382 * dma_fence_get_status_locked - returns the status upon completion
 383 * @fence: [in] the dma_fence to query
 384 *
 385 * Drivers can supply an optional error status condition before they signal
 386 * the fence (to indicate whether the fence was completed due to an error
 387 * rather than success). The value of the status condition is only valid
 388 * if the fence has been signaled, dma_fence_get_status_locked() first checks
 389 * the signal state before reporting the error status.
 390 *
 391 * Returns 0 if the fence has not yet been signaled, 1 if the fence has
 392 * been signaled without an error condition, or a negative error code
 393 * if the fence has been completed in err.
 394 */
 395static inline int dma_fence_get_status_locked(struct dma_fence *fence)
 396{
 397        if (dma_fence_is_signaled_locked(fence))
 398                return fence->error ?: 1;
 399        else
 400                return 0;
 401}
 402
 403int dma_fence_get_status(struct dma_fence *fence);
 404
 405/**
 406 * dma_fence_set_error - flag an error condition on the fence
 407 * @fence: [in] the dma_fence
 408 * @error: [in] the error to store
 409 *
 410 * Drivers can supply an optional error status condition before they signal
 411 * the fence, to indicate that the fence was completed due to an error
 412 * rather than success. This must be set before signaling (so that the value
 413 * is visible before any waiters on the signal callback are woken). This
 414 * helper exists to help catching erroneous setting of #dma_fence.error.
 415 */
 416static inline void dma_fence_set_error(struct dma_fence *fence,
 417                                       int error)
 418{
 419        BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
 420        BUG_ON(error >= 0 || error < -MAX_ERRNO);
 421
 422        fence->error = error;
 423}
 424
 425signed long dma_fence_wait_timeout(struct dma_fence *,
 426                                   bool intr, signed long timeout);
 427signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
 428                                       uint32_t count,
 429                                       bool intr, signed long timeout,
 430                                       uint32_t *idx);
 431
 432/**
 433 * dma_fence_wait - sleep until the fence gets signaled
 434 * @fence:      [in]    the fence to wait on
 435 * @intr:       [in]    if true, do an interruptible wait
 436 *
 437 * This function will return -ERESTARTSYS if interrupted by a signal,
 438 * or 0 if the fence was signaled. Other error values may be
 439 * returned on custom implementations.
 440 *
 441 * Performs a synchronous wait on this fence. It is assumed the caller
 442 * directly or indirectly holds a reference to the fence, otherwise the
 443 * fence might be freed before return, resulting in undefined behavior.
 444 */
 445static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
 446{
 447        signed long ret;
 448
 449        /* Since dma_fence_wait_timeout cannot timeout with
 450         * MAX_SCHEDULE_TIMEOUT, only valid return values are
 451         * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
 452         */
 453        ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
 454
 455        return ret < 0 ? ret : 0;
 456}
 457
 458u64 dma_fence_context_alloc(unsigned num);
 459
 460#define DMA_FENCE_TRACE(f, fmt, args...) \
 461        do {                                                            \
 462                struct dma_fence *__ff = (f);                           \
 463                if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE))                 \
 464                        pr_info("f %llu#%u: " fmt,                      \
 465                                __ff->context, __ff->seqno, ##args);    \
 466        } while (0)
 467
 468#define DMA_FENCE_WARN(f, fmt, args...) \
 469        do {                                                            \
 470                struct dma_fence *__ff = (f);                           \
 471                pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno,  \
 472                         ##args);                                       \
 473        } while (0)
 474
 475#define DMA_FENCE_ERR(f, fmt, args...) \
 476        do {                                                            \
 477                struct dma_fence *__ff = (f);                           \
 478                pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno,   \
 479                        ##args);                                        \
 480        } while (0)
 481
 482#endif /* __LINUX_DMA_FENCE_H */
 483