linux/drivers/dma-buf/fence.c
<<
>>
Prefs
   1/*
   2 * Fence mechanism for dma-buf and to allow for asynchronous dma access
   3 *
   4 * Copyright (C) 2012 Canonical Ltd
   5 * Copyright (C) 2012 Texas Instruments
   6 *
   7 * Authors:
   8 * Rob Clark <robdclark@gmail.com>
   9 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
  10 *
  11 * This program is free software; you can redistribute it and/or modify it
  12 * under the terms of the GNU General Public License version 2 as published by
  13 * the Free Software Foundation.
  14 *
  15 * This program is distributed in the hope that it will be useful, but WITHOUT
  16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  18 * more details.
  19 */
  20
  21#include <linux/slab.h>
  22#include <linux/export.h>
  23#include <linux/atomic.h>
  24#include <linux/fence.h>
  25
  26#define CREATE_TRACE_POINTS
  27#include <trace/events/fence.h>
  28
  29EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
  30EXPORT_TRACEPOINT_SYMBOL(fence_emit);
  31
  32/*
  33 * fence context counter: each execution context should have its own
  34 * fence context, this allows checking if fences belong to the same
  35 * context or not. One device can have multiple separate contexts,
  36 * and they're used if some engine can run independently of another.
  37 */
  38static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
  39
  40/**
  41 * fence_context_alloc - allocate an array of fence contexts
  42 * @num:        [in]    amount of contexts to allocate
  43 *
  44 * This function will return the first index of the number of fences allocated.
  45 * The fence context is used for setting fence->context to a unique number.
  46 */
  47u64 fence_context_alloc(unsigned num)
  48{
  49        BUG_ON(!num);
  50        return atomic64_add_return(num, &fence_context_counter) - num;
  51}
  52EXPORT_SYMBOL(fence_context_alloc);
  53
  54/**
  55 * fence_signal_locked - signal completion of a fence
  56 * @fence: the fence to signal
  57 *
  58 * Signal completion for software callbacks on a fence, this will unblock
  59 * fence_wait() calls and run all the callbacks added with
  60 * fence_add_callback(). Can be called multiple times, but since a fence
  61 * can only go from unsignaled to signaled state, it will only be effective
  62 * the first time.
  63 *
  64 * Unlike fence_signal, this function must be called with fence->lock held.
  65 */
  66int fence_signal_locked(struct fence *fence)
  67{
  68        struct fence_cb *cur, *tmp;
  69        int ret = 0;
  70
  71        if (WARN_ON(!fence))
  72                return -EINVAL;
  73
  74        if (!ktime_to_ns(fence->timestamp)) {
  75                fence->timestamp = ktime_get();
  76                smp_mb__before_atomic();
  77        }
  78
  79        if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
  80                ret = -EINVAL;
  81
  82                /*
  83                 * we might have raced with the unlocked fence_signal,
  84                 * still run through all callbacks
  85                 */
  86        } else
  87                trace_fence_signaled(fence);
  88
  89        list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
  90                list_del_init(&cur->node);
  91                cur->func(fence, cur);
  92        }
  93        return ret;
  94}
  95EXPORT_SYMBOL(fence_signal_locked);
  96
  97/**
  98 * fence_signal - signal completion of a fence
  99 * @fence: the fence to signal
 100 *
 101 * Signal completion for software callbacks on a fence, this will unblock
 102 * fence_wait() calls and run all the callbacks added with
 103 * fence_add_callback(). Can be called multiple times, but since a fence
 104 * can only go from unsignaled to signaled state, it will only be effective
 105 * the first time.
 106 */
 107int fence_signal(struct fence *fence)
 108{
 109        unsigned long flags;
 110
 111        if (!fence)
 112                return -EINVAL;
 113
 114        if (!ktime_to_ns(fence->timestamp)) {
 115                fence->timestamp = ktime_get();
 116                smp_mb__before_atomic();
 117        }
 118
 119        if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 120                return -EINVAL;
 121
 122        trace_fence_signaled(fence);
 123
 124        if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
 125                struct fence_cb *cur, *tmp;
 126
 127                spin_lock_irqsave(fence->lock, flags);
 128                list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
 129                        list_del_init(&cur->node);
 130                        cur->func(fence, cur);
 131                }
 132                spin_unlock_irqrestore(fence->lock, flags);
 133        }
 134        return 0;
 135}
 136EXPORT_SYMBOL(fence_signal);
 137
 138/**
 139 * fence_wait_timeout - sleep until the fence gets signaled
 140 * or until timeout elapses
 141 * @fence:      [in]    the fence to wait on
 142 * @intr:       [in]    if true, do an interruptible wait
 143 * @timeout:    [in]    timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 144 *
 145 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
 146 * remaining timeout in jiffies on success. Other error values may be
 147 * returned on custom implementations.
 148 *
 149 * Performs a synchronous wait on this fence. It is assumed the caller
 150 * directly or indirectly (buf-mgr between reservation and committing)
 151 * holds a reference to the fence, otherwise the fence might be
 152 * freed before return, resulting in undefined behavior.
 153 */
 154signed long
 155fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
 156{
 157        signed long ret;
 158
 159        if (WARN_ON(timeout < 0))
 160                return -EINVAL;
 161
 162        if (timeout == 0)
 163                return fence_is_signaled(fence);
 164
 165        trace_fence_wait_start(fence);
 166        ret = fence->ops->wait(fence, intr, timeout);
 167        trace_fence_wait_end(fence);
 168        return ret;
 169}
 170EXPORT_SYMBOL(fence_wait_timeout);
 171
 172void fence_release(struct kref *kref)
 173{
 174        struct fence *fence =
 175                        container_of(kref, struct fence, refcount);
 176
 177        trace_fence_destroy(fence);
 178
 179        BUG_ON(!list_empty(&fence->cb_list));
 180
 181        if (fence->ops->release)
 182                fence->ops->release(fence);
 183        else
 184                fence_free(fence);
 185}
 186EXPORT_SYMBOL(fence_release);
 187
 188void fence_free(struct fence *fence)
 189{
 190        kfree_rcu(fence, rcu);
 191}
 192EXPORT_SYMBOL(fence_free);
 193
 194/**
 195 * fence_enable_sw_signaling - enable signaling on fence
 196 * @fence:      [in]    the fence to enable
 197 *
 198 * this will request for sw signaling to be enabled, to make the fence
 199 * complete as soon as possible
 200 */
 201void fence_enable_sw_signaling(struct fence *fence)
 202{
 203        unsigned long flags;
 204
 205        if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
 206            !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 207                trace_fence_enable_signal(fence);
 208
 209                spin_lock_irqsave(fence->lock, flags);
 210
 211                if (!fence->ops->enable_signaling(fence))
 212                        fence_signal_locked(fence);
 213
 214                spin_unlock_irqrestore(fence->lock, flags);
 215        }
 216}
 217EXPORT_SYMBOL(fence_enable_sw_signaling);
 218
 219/**
 220 * fence_add_callback - add a callback to be called when the fence
 221 * is signaled
 222 * @fence:      [in]    the fence to wait on
 223 * @cb:         [in]    the callback to register
 224 * @func:       [in]    the function to call
 225 *
 226 * cb will be initialized by fence_add_callback, no initialization
 227 * by the caller is required. Any number of callbacks can be registered
 228 * to a fence, but a callback can only be registered to one fence at a time.
 229 *
 230 * Note that the callback can be called from an atomic context.  If
 231 * fence is already signaled, this function will return -ENOENT (and
 232 * *not* call the callback)
 233 *
 234 * Add a software callback to the fence. Same restrictions apply to
 235 * refcount as it does to fence_wait, however the caller doesn't need to
 236 * keep a refcount to fence afterwards: when software access is enabled,
 237 * the creator of the fence is required to keep the fence alive until
 238 * after it signals with fence_signal. The callback itself can be called
 239 * from irq context.
 240 *
 241 */
 242int fence_add_callback(struct fence *fence, struct fence_cb *cb,
 243                       fence_func_t func)
 244{
 245        unsigned long flags;
 246        int ret = 0;
 247        bool was_set;
 248
 249        if (WARN_ON(!fence || !func))
 250                return -EINVAL;
 251
 252        if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 253                INIT_LIST_HEAD(&cb->node);
 254                return -ENOENT;
 255        }
 256
 257        spin_lock_irqsave(fence->lock, flags);
 258
 259        was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
 260
 261        if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 262                ret = -ENOENT;
 263        else if (!was_set) {
 264                trace_fence_enable_signal(fence);
 265
 266                if (!fence->ops->enable_signaling(fence)) {
 267                        fence_signal_locked(fence);
 268                        ret = -ENOENT;
 269                }
 270        }
 271
 272        if (!ret) {
 273                cb->func = func;
 274                list_add_tail(&cb->node, &fence->cb_list);
 275        } else
 276                INIT_LIST_HEAD(&cb->node);
 277        spin_unlock_irqrestore(fence->lock, flags);
 278
 279        return ret;
 280}
 281EXPORT_SYMBOL(fence_add_callback);
 282
 283/**
 284 * fence_remove_callback - remove a callback from the signaling list
 285 * @fence:      [in]    the fence to wait on
 286 * @cb:         [in]    the callback to remove
 287 *
 288 * Remove a previously queued callback from the fence. This function returns
 289 * true if the callback is successfully removed, or false if the fence has
 290 * already been signaled.
 291 *
 292 * *WARNING*:
 293 * Cancelling a callback should only be done if you really know what you're
 294 * doing, since deadlocks and race conditions could occur all too easily. For
 295 * this reason, it should only ever be done on hardware lockup recovery,
 296 * with a reference held to the fence.
 297 */
 298bool
 299fence_remove_callback(struct fence *fence, struct fence_cb *cb)
 300{
 301        unsigned long flags;
 302        bool ret;
 303
 304        spin_lock_irqsave(fence->lock, flags);
 305
 306        ret = !list_empty(&cb->node);
 307        if (ret)
 308                list_del_init(&cb->node);
 309
 310        spin_unlock_irqrestore(fence->lock, flags);
 311
 312        return ret;
 313}
 314EXPORT_SYMBOL(fence_remove_callback);
 315
 316struct default_wait_cb {
 317        struct fence_cb base;
 318        struct task_struct *task;
 319};
 320
 321static void
 322fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
 323{
 324        struct default_wait_cb *wait =
 325                container_of(cb, struct default_wait_cb, base);
 326
 327        wake_up_state(wait->task, TASK_NORMAL);
 328}
 329
 330/**
 331 * fence_default_wait - default sleep until the fence gets signaled
 332 * or until timeout elapses
 333 * @fence:      [in]    the fence to wait on
 334 * @intr:       [in]    if true, do an interruptible wait
 335 * @timeout:    [in]    timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 336 *
 337 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
 338 * remaining timeout in jiffies on success.
 339 */
 340signed long
 341fence_default_wait(struct fence *fence, bool intr, signed long timeout)
 342{
 343        struct default_wait_cb cb;
 344        unsigned long flags;
 345        signed long ret = timeout;
 346        bool was_set;
 347
 348        if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 349                return timeout;
 350
 351        spin_lock_irqsave(fence->lock, flags);
 352
 353        if (intr && signal_pending(current)) {
 354                ret = -ERESTARTSYS;
 355                goto out;
 356        }
 357
 358        was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
 359
 360        if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 361                goto out;
 362
 363        if (!was_set) {
 364                trace_fence_enable_signal(fence);
 365
 366                if (!fence->ops->enable_signaling(fence)) {
 367                        fence_signal_locked(fence);
 368                        goto out;
 369                }
 370        }
 371
 372        cb.base.func = fence_default_wait_cb;
 373        cb.task = current;
 374        list_add(&cb.base.node, &fence->cb_list);
 375
 376        while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
 377                if (intr)
 378                        __set_current_state(TASK_INTERRUPTIBLE);
 379                else
 380                        __set_current_state(TASK_UNINTERRUPTIBLE);
 381                spin_unlock_irqrestore(fence->lock, flags);
 382
 383                ret = schedule_timeout(ret);
 384
 385                spin_lock_irqsave(fence->lock, flags);
 386                if (ret > 0 && intr && signal_pending(current))
 387                        ret = -ERESTARTSYS;
 388        }
 389
 390        if (!list_empty(&cb.base.node))
 391                list_del(&cb.base.node);
 392        __set_current_state(TASK_RUNNING);
 393
 394out:
 395        spin_unlock_irqrestore(fence->lock, flags);
 396        return ret;
 397}
 398EXPORT_SYMBOL(fence_default_wait);
 399
 400static bool
 401fence_test_signaled_any(struct fence **fences, uint32_t count)
 402{
 403        int i;
 404
 405        for (i = 0; i < count; ++i) {
 406                struct fence *fence = fences[i];
 407                if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 408                        return true;
 409        }
 410        return false;
 411}
 412
 413/**
 414 * fence_wait_any_timeout - sleep until any fence gets signaled
 415 * or until timeout elapses
 416 * @fences:     [in]    array of fences to wait on
 417 * @count:      [in]    number of fences to wait on
 418 * @intr:       [in]    if true, do an interruptible wait
 419 * @timeout:    [in]    timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 420 *
 421 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
 422 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
 423 * on success.
 424 *
 425 * Synchronous waits for the first fence in the array to be signaled. The
 426 * caller needs to hold a reference to all fences in the array, otherwise a
 427 * fence might be freed before return, resulting in undefined behavior.
 428 */
 429signed long
 430fence_wait_any_timeout(struct fence **fences, uint32_t count,
 431                       bool intr, signed long timeout)
 432{
 433        struct default_wait_cb *cb;
 434        signed long ret = timeout;
 435        unsigned i;
 436
 437        if (WARN_ON(!fences || !count || timeout < 0))
 438                return -EINVAL;
 439
 440        if (timeout == 0) {
 441                for (i = 0; i < count; ++i)
 442                        if (fence_is_signaled(fences[i]))
 443                                return 1;
 444
 445                return 0;
 446        }
 447
 448        cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
 449        if (cb == NULL) {
 450                ret = -ENOMEM;
 451                goto err_free_cb;
 452        }
 453
 454        for (i = 0; i < count; ++i) {
 455                struct fence *fence = fences[i];
 456
 457                if (fence->ops->wait != fence_default_wait) {
 458                        ret = -EINVAL;
 459                        goto fence_rm_cb;
 460                }
 461
 462                cb[i].task = current;
 463                if (fence_add_callback(fence, &cb[i].base,
 464                                       fence_default_wait_cb)) {
 465                        /* This fence is already signaled */
 466                        goto fence_rm_cb;
 467                }
 468        }
 469
 470        while (ret > 0) {
 471                if (intr)
 472                        set_current_state(TASK_INTERRUPTIBLE);
 473                else
 474                        set_current_state(TASK_UNINTERRUPTIBLE);
 475
 476                if (fence_test_signaled_any(fences, count))
 477                        break;
 478
 479                ret = schedule_timeout(ret);
 480
 481                if (ret > 0 && intr && signal_pending(current))
 482                        ret = -ERESTARTSYS;
 483        }
 484
 485        __set_current_state(TASK_RUNNING);
 486
 487fence_rm_cb:
 488        while (i-- > 0)
 489                fence_remove_callback(fences[i], &cb[i].base);
 490
 491err_free_cb:
 492        kfree(cb);
 493
 494        return ret;
 495}
 496EXPORT_SYMBOL(fence_wait_any_timeout);
 497
 498/**
 499 * fence_init - Initialize a custom fence.
 500 * @fence:      [in]    the fence to initialize
 501 * @ops:        [in]    the fence_ops for operations on this fence
 502 * @lock:       [in]    the irqsafe spinlock to use for locking this fence
 503 * @context:    [in]    the execution context this fence is run on
 504 * @seqno:      [in]    a linear increasing sequence number for this context
 505 *
 506 * Initializes an allocated fence, the caller doesn't have to keep its
 507 * refcount after committing with this fence, but it will need to hold a
 508 * refcount again if fence_ops.enable_signaling gets called. This can
 509 * be used for other implementing other types of fence.
 510 *
 511 * context and seqno are used for easy comparison between fences, allowing
 512 * to check which fence is later by simply using fence_later.
 513 */
 514void
 515fence_init(struct fence *fence, const struct fence_ops *ops,
 516             spinlock_t *lock, u64 context, unsigned seqno)
 517{
 518        BUG_ON(!lock);
 519        BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
 520               !ops->get_driver_name || !ops->get_timeline_name);
 521
 522        kref_init(&fence->refcount);
 523        fence->ops = ops;
 524        INIT_LIST_HEAD(&fence->cb_list);
 525        fence->lock = lock;
 526        fence->context = context;
 527        fence->seqno = seqno;
 528        fence->flags = 0UL;
 529
 530        trace_fence_init(fence);
 531}
 532EXPORT_SYMBOL(fence_init);
 533