linux/drivers/dma-buf/dma-fence.c
<<
>>
Prefs
   1/*
   2 * Fence mechanism for dma-buf and to allow for asynchronous dma access
   3 *
   4 * Copyright (C) 2012 Canonical Ltd
   5 * Copyright (C) 2012 Texas Instruments
   6 *
   7 * Authors:
   8 * Rob Clark <robdclark@gmail.com>
   9 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
  10 *
  11 * This program is free software; you can redistribute it and/or modify it
  12 * under the terms of the GNU General Public License version 2 as published by
  13 * the Free Software Foundation.
  14 *
  15 * This program is distributed in the hope that it will be useful, but WITHOUT
  16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  18 * more details.
  19 */
  20
  21#include <linux/slab.h>
  22#include <linux/export.h>
  23#include <linux/atomic.h>
  24#include <linux/dma-fence.h>
  25
  26#define CREATE_TRACE_POINTS
  27#include <trace/events/dma_fence.h>
  28
  29EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
  30EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
  31
  32/*
  33 * fence context counter: each execution context should have its own
  34 * fence context, this allows checking if fences belong to the same
  35 * context or not. One device can have multiple separate contexts,
  36 * and they're used if some engine can run independently of another.
  37 */
  38static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
  39
  40/**
  41 * dma_fence_context_alloc - allocate an array of fence contexts
  42 * @num:        [in]    amount of contexts to allocate
  43 *
  44 * This function will return the first index of the number of fences allocated.
  45 * The fence context is used for setting fence->context to a unique number.
  46 */
  47u64 dma_fence_context_alloc(unsigned num)
  48{
  49        BUG_ON(!num);
  50        return atomic64_add_return(num, &dma_fence_context_counter) - num;
  51}
  52EXPORT_SYMBOL(dma_fence_context_alloc);
  53
  54/**
  55 * dma_fence_signal_locked - signal completion of a fence
  56 * @fence: the fence to signal
  57 *
  58 * Signal completion for software callbacks on a fence, this will unblock
  59 * dma_fence_wait() calls and run all the callbacks added with
  60 * dma_fence_add_callback(). Can be called multiple times, but since a fence
  61 * can only go from unsignaled to signaled state, it will only be effective
  62 * the first time.
  63 *
  64 * Unlike dma_fence_signal, this function must be called with fence->lock held.
  65 */
  66int dma_fence_signal_locked(struct dma_fence *fence)
  67{
  68        struct dma_fence_cb *cur, *tmp;
  69        int ret = 0;
  70
  71        lockdep_assert_held(fence->lock);
  72
  73        if (WARN_ON(!fence))
  74                return -EINVAL;
  75
  76        if (!ktime_to_ns(fence->timestamp)) {
  77                fence->timestamp = ktime_get();
  78                smp_mb__before_atomic();
  79        }
  80
  81        if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
  82                ret = -EINVAL;
  83
  84                /*
  85                 * we might have raced with the unlocked dma_fence_signal,
  86                 * still run through all callbacks
  87                 */
  88        } else
  89                trace_dma_fence_signaled(fence);
  90
  91        list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
  92                list_del_init(&cur->node);
  93                cur->func(fence, cur);
  94        }
  95        return ret;
  96}
  97EXPORT_SYMBOL(dma_fence_signal_locked);
  98
  99/**
 100 * dma_fence_signal - signal completion of a fence
 101 * @fence: the fence to signal
 102 *
 103 * Signal completion for software callbacks on a fence, this will unblock
 104 * dma_fence_wait() calls and run all the callbacks added with
 105 * dma_fence_add_callback(). Can be called multiple times, but since a fence
 106 * can only go from unsignaled to signaled state, it will only be effective
 107 * the first time.
 108 */
 109int dma_fence_signal(struct dma_fence *fence)
 110{
 111        unsigned long flags;
 112
 113        if (!fence)
 114                return -EINVAL;
 115
 116        if (!ktime_to_ns(fence->timestamp)) {
 117                fence->timestamp = ktime_get();
 118                smp_mb__before_atomic();
 119        }
 120
 121        if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 122                return -EINVAL;
 123
 124        trace_dma_fence_signaled(fence);
 125
 126        if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
 127                struct dma_fence_cb *cur, *tmp;
 128
 129                spin_lock_irqsave(fence->lock, flags);
 130                list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
 131                        list_del_init(&cur->node);
 132                        cur->func(fence, cur);
 133                }
 134                spin_unlock_irqrestore(fence->lock, flags);
 135        }
 136        return 0;
 137}
 138EXPORT_SYMBOL(dma_fence_signal);
 139
 140/**
 141 * dma_fence_wait_timeout - sleep until the fence gets signaled
 142 * or until timeout elapses
 143 * @fence:      [in]    the fence to wait on
 144 * @intr:       [in]    if true, do an interruptible wait
 145 * @timeout:    [in]    timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 146 *
 147 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
 148 * remaining timeout in jiffies on success. Other error values may be
 149 * returned on custom implementations.
 150 *
 151 * Performs a synchronous wait on this fence. It is assumed the caller
 152 * directly or indirectly (buf-mgr between reservation and committing)
 153 * holds a reference to the fence, otherwise the fence might be
 154 * freed before return, resulting in undefined behavior.
 155 */
 156signed long
 157dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
 158{
 159        signed long ret;
 160
 161        if (WARN_ON(timeout < 0))
 162                return -EINVAL;
 163
 164        trace_dma_fence_wait_start(fence);
 165        ret = fence->ops->wait(fence, intr, timeout);
 166        trace_dma_fence_wait_end(fence);
 167        return ret;
 168}
 169EXPORT_SYMBOL(dma_fence_wait_timeout);
 170
 171void dma_fence_release(struct kref *kref)
 172{
 173        struct dma_fence *fence =
 174                container_of(kref, struct dma_fence, refcount);
 175
 176        trace_dma_fence_destroy(fence);
 177
 178        BUG_ON(!list_empty(&fence->cb_list));
 179
 180        if (fence->ops->release)
 181                fence->ops->release(fence);
 182        else
 183                dma_fence_free(fence);
 184}
 185EXPORT_SYMBOL(dma_fence_release);
 186
 187void dma_fence_free(struct dma_fence *fence)
 188{
 189        kfree_rcu(fence, rcu);
 190}
 191EXPORT_SYMBOL(dma_fence_free);
 192
 193/**
 194 * dma_fence_enable_sw_signaling - enable signaling on fence
 195 * @fence:      [in]    the fence to enable
 196 *
 197 * this will request for sw signaling to be enabled, to make the fence
 198 * complete as soon as possible
 199 */
 200void dma_fence_enable_sw_signaling(struct dma_fence *fence)
 201{
 202        unsigned long flags;
 203
 204        if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
 205                              &fence->flags) &&
 206            !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 207                trace_dma_fence_enable_signal(fence);
 208
 209                spin_lock_irqsave(fence->lock, flags);
 210
 211                if (!fence->ops->enable_signaling(fence))
 212                        dma_fence_signal_locked(fence);
 213
 214                spin_unlock_irqrestore(fence->lock, flags);
 215        }
 216}
 217EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
 218
 219/**
 220 * dma_fence_add_callback - add a callback to be called when the fence
 221 * is signaled
 222 * @fence:      [in]    the fence to wait on
 223 * @cb:         [in]    the callback to register
 224 * @func:       [in]    the function to call
 225 *
 226 * cb will be initialized by dma_fence_add_callback, no initialization
 227 * by the caller is required. Any number of callbacks can be registered
 228 * to a fence, but a callback can only be registered to one fence at a time.
 229 *
 230 * Note that the callback can be called from an atomic context.  If
 231 * fence is already signaled, this function will return -ENOENT (and
 232 * *not* call the callback)
 233 *
 234 * Add a software callback to the fence. Same restrictions apply to
 235 * refcount as it does to dma_fence_wait, however the caller doesn't need to
 236 * keep a refcount to fence afterwards: when software access is enabled,
 237 * the creator of the fence is required to keep the fence alive until
 238 * after it signals with dma_fence_signal. The callback itself can be called
 239 * from irq context.
 240 *
 241 */
 242int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
 243                           dma_fence_func_t func)
 244{
 245        unsigned long flags;
 246        int ret = 0;
 247        bool was_set;
 248
 249        if (WARN_ON(!fence || !func))
 250                return -EINVAL;
 251
 252        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 253                INIT_LIST_HEAD(&cb->node);
 254                return -ENOENT;
 255        }
 256
 257        spin_lock_irqsave(fence->lock, flags);
 258
 259        was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
 260                                   &fence->flags);
 261
 262        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 263                ret = -ENOENT;
 264        else if (!was_set) {
 265                trace_dma_fence_enable_signal(fence);
 266
 267                if (!fence->ops->enable_signaling(fence)) {
 268                        dma_fence_signal_locked(fence);
 269                        ret = -ENOENT;
 270                }
 271        }
 272
 273        if (!ret) {
 274                cb->func = func;
 275                list_add_tail(&cb->node, &fence->cb_list);
 276        } else
 277                INIT_LIST_HEAD(&cb->node);
 278        spin_unlock_irqrestore(fence->lock, flags);
 279
 280        return ret;
 281}
 282EXPORT_SYMBOL(dma_fence_add_callback);
 283
 284/**
 285 * dma_fence_remove_callback - remove a callback from the signaling list
 286 * @fence:      [in]    the fence to wait on
 287 * @cb:         [in]    the callback to remove
 288 *
 289 * Remove a previously queued callback from the fence. This function returns
 290 * true if the callback is successfully removed, or false if the fence has
 291 * already been signaled.
 292 *
 293 * *WARNING*:
 294 * Cancelling a callback should only be done if you really know what you're
 295 * doing, since deadlocks and race conditions could occur all too easily. For
 296 * this reason, it should only ever be done on hardware lockup recovery,
 297 * with a reference held to the fence.
 298 */
 299bool
 300dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
 301{
 302        unsigned long flags;
 303        bool ret;
 304
 305        spin_lock_irqsave(fence->lock, flags);
 306
 307        ret = !list_empty(&cb->node);
 308        if (ret)
 309                list_del_init(&cb->node);
 310
 311        spin_unlock_irqrestore(fence->lock, flags);
 312
 313        return ret;
 314}
 315EXPORT_SYMBOL(dma_fence_remove_callback);
 316
 317struct default_wait_cb {
 318        struct dma_fence_cb base;
 319        struct task_struct *task;
 320};
 321
 322static void
 323dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 324{
 325        struct default_wait_cb *wait =
 326                container_of(cb, struct default_wait_cb, base);
 327
 328        wake_up_state(wait->task, TASK_NORMAL);
 329}
 330
 331/**
 332 * dma_fence_default_wait - default sleep until the fence gets signaled
 333 * or until timeout elapses
 334 * @fence:      [in]    the fence to wait on
 335 * @intr:       [in]    if true, do an interruptible wait
 336 * @timeout:    [in]    timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 337 *
 338 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
 339 * remaining timeout in jiffies on success. If timeout is zero the value one is
 340 * returned if the fence is already signaled for consistency with other
 341 * functions taking a jiffies timeout.
 342 */
 343signed long
 344dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
 345{
 346        struct default_wait_cb cb;
 347        unsigned long flags;
 348        signed long ret = timeout ? timeout : 1;
 349        bool was_set;
 350
 351        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 352                return ret;
 353
 354        spin_lock_irqsave(fence->lock, flags);
 355
 356        if (intr && signal_pending(current)) {
 357                ret = -ERESTARTSYS;
 358                goto out;
 359        }
 360
 361        was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
 362                                   &fence->flags);
 363
 364        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 365                goto out;
 366
 367        if (!was_set) {
 368                trace_dma_fence_enable_signal(fence);
 369
 370                if (!fence->ops->enable_signaling(fence)) {
 371                        dma_fence_signal_locked(fence);
 372                        goto out;
 373                }
 374        }
 375
 376        cb.base.func = dma_fence_default_wait_cb;
 377        cb.task = current;
 378        list_add(&cb.base.node, &fence->cb_list);
 379
 380        while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
 381                if (intr)
 382                        __set_current_state(TASK_INTERRUPTIBLE);
 383                else
 384                        __set_current_state(TASK_UNINTERRUPTIBLE);
 385                spin_unlock_irqrestore(fence->lock, flags);
 386
 387                ret = schedule_timeout(ret);
 388
 389                spin_lock_irqsave(fence->lock, flags);
 390                if (ret > 0 && intr && signal_pending(current))
 391                        ret = -ERESTARTSYS;
 392        }
 393
 394        if (!list_empty(&cb.base.node))
 395                list_del(&cb.base.node);
 396        __set_current_state(TASK_RUNNING);
 397
 398out:
 399        spin_unlock_irqrestore(fence->lock, flags);
 400        return ret;
 401}
 402EXPORT_SYMBOL(dma_fence_default_wait);
 403
 404static bool
 405dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
 406                            uint32_t *idx)
 407{
 408        int i;
 409
 410        for (i = 0; i < count; ++i) {
 411                struct dma_fence *fence = fences[i];
 412                if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 413                        if (idx)
 414                                *idx = i;
 415                        return true;
 416                }
 417        }
 418        return false;
 419}
 420
 421/**
 422 * dma_fence_wait_any_timeout - sleep until any fence gets signaled
 423 * or until timeout elapses
 424 * @fences:     [in]    array of fences to wait on
 425 * @count:      [in]    number of fences to wait on
 426 * @intr:       [in]    if true, do an interruptible wait
 427 * @timeout:    [in]    timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 428 * @idx:       [out]    the first signaled fence index, meaningful only on
 429 *                      positive return
 430 *
 431 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
 432 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
 433 * on success.
 434 *
 435 * Synchronous waits for the first fence in the array to be signaled. The
 436 * caller needs to hold a reference to all fences in the array, otherwise a
 437 * fence might be freed before return, resulting in undefined behavior.
 438 */
 439signed long
 440dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
 441                           bool intr, signed long timeout, uint32_t *idx)
 442{
 443        struct default_wait_cb *cb;
 444        signed long ret = timeout;
 445        unsigned i;
 446
 447        if (WARN_ON(!fences || !count || timeout < 0))
 448                return -EINVAL;
 449
 450        if (timeout == 0) {
 451                for (i = 0; i < count; ++i)
 452                        if (dma_fence_is_signaled(fences[i])) {
 453                                if (idx)
 454                                        *idx = i;
 455                                return 1;
 456                        }
 457
 458                return 0;
 459        }
 460
 461        cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
 462        if (cb == NULL) {
 463                ret = -ENOMEM;
 464                goto err_free_cb;
 465        }
 466
 467        for (i = 0; i < count; ++i) {
 468                struct dma_fence *fence = fences[i];
 469
 470                if (fence->ops->wait != dma_fence_default_wait) {
 471                        ret = -EINVAL;
 472                        goto fence_rm_cb;
 473                }
 474
 475                cb[i].task = current;
 476                if (dma_fence_add_callback(fence, &cb[i].base,
 477                                           dma_fence_default_wait_cb)) {
 478                        /* This fence is already signaled */
 479                        if (idx)
 480                                *idx = i;
 481                        goto fence_rm_cb;
 482                }
 483        }
 484
 485        while (ret > 0) {
 486                if (intr)
 487                        set_current_state(TASK_INTERRUPTIBLE);
 488                else
 489                        set_current_state(TASK_UNINTERRUPTIBLE);
 490
 491                if (dma_fence_test_signaled_any(fences, count, idx))
 492                        break;
 493
 494                ret = schedule_timeout(ret);
 495
 496                if (ret > 0 && intr && signal_pending(current))
 497                        ret = -ERESTARTSYS;
 498        }
 499
 500        __set_current_state(TASK_RUNNING);
 501
 502fence_rm_cb:
 503        while (i-- > 0)
 504                dma_fence_remove_callback(fences[i], &cb[i].base);
 505
 506err_free_cb:
 507        kfree(cb);
 508
 509        return ret;
 510}
 511EXPORT_SYMBOL(dma_fence_wait_any_timeout);
 512
 513/**
 514 * dma_fence_init - Initialize a custom fence.
 515 * @fence:      [in]    the fence to initialize
 516 * @ops:        [in]    the dma_fence_ops for operations on this fence
 517 * @lock:       [in]    the irqsafe spinlock to use for locking this fence
 518 * @context:    [in]    the execution context this fence is run on
 519 * @seqno:      [in]    a linear increasing sequence number for this context
 520 *
 521 * Initializes an allocated fence, the caller doesn't have to keep its
 522 * refcount after committing with this fence, but it will need to hold a
 523 * refcount again if dma_fence_ops.enable_signaling gets called. This can
 524 * be used for other implementing other types of fence.
 525 *
 526 * context and seqno are used for easy comparison between fences, allowing
 527 * to check which fence is later by simply using dma_fence_later.
 528 */
 529void
 530dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
 531               spinlock_t *lock, u64 context, unsigned seqno)
 532{
 533        BUG_ON(!lock);
 534        BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
 535               !ops->get_driver_name || !ops->get_timeline_name);
 536
 537        kref_init(&fence->refcount);
 538        fence->ops = ops;
 539        INIT_LIST_HEAD(&fence->cb_list);
 540        fence->lock = lock;
 541        fence->context = context;
 542        fence->seqno = seqno;
 543        fence->flags = 0UL;
 544
 545        trace_dma_fence_init(fence);
 546}
 547EXPORT_SYMBOL(dma_fence_init);
 548