1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Fence mechanism for dma-buf to allow for asynchronous dma access 4 * 5 * Copyright (C) 2012 Canonical Ltd 6 * Copyright (C) 2012 Texas Instruments 7 * 8 * Authors: 9 * Rob Clark <robdclark@gmail.com> 10 * Maarten Lankhorst <maarten.lankhorst@canonical.com> 11 */ 12 13#ifndef __LINUX_DMA_FENCE_H 14#define __LINUX_DMA_FENCE_H 15 16#include <linux/err.h> 17#include <linux/wait.h> 18#include <linux/list.h> 19#include <linux/bitops.h> 20#include <linux/kref.h> 21#include <linux/sched.h> 22#include <linux/printk.h> 23#include <linux/rcupdate.h> 24 25struct dma_fence; 26struct dma_fence_ops; 27struct dma_fence_cb; 28 29/** 30 * struct dma_fence - software synchronization primitive 31 * @refcount: refcount for this fence 32 * @ops: dma_fence_ops associated with this fence 33 * @rcu: used for releasing fence with kfree_rcu 34 * @cb_list: list of all callbacks to call 35 * @lock: spin_lock_irqsave used for locking 36 * @context: execution context this fence belongs to, returned by 37 * dma_fence_context_alloc() 38 * @seqno: the sequence number of this fence inside the execution context, 39 * can be compared to decide which fence would be signaled later. 40 * @flags: A mask of DMA_FENCE_FLAG_* defined below 41 * @timestamp: Timestamp when the fence was signaled. 42 * @error: Optional, only valid if < 0, must be set before calling 43 * dma_fence_signal, indicates that the fence has completed with an error. 44 * 45 * the flags member must be manipulated and read using the appropriate 46 * atomic ops (bit_*), so taking the spinlock will not be needed most 47 * of the time. 48 * 49 * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled 50 * DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling 51 * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called 52 * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the 53 * implementer of the fence for its own purposes. Can be used in different 54 * ways by different fence implementers, so do not rely on this. 55 * 56 * Since atomic bitops are used, this is not guaranteed to be the case. 57 * Particularly, if the bit was set, but dma_fence_signal was called right 58 * before this bit was set, it would have been able to set the 59 * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. 60 * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting 61 * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that 62 * after dma_fence_signal was called, any enable_signaling call will have either 63 * been completed, or never called at all. 64 */ 65struct dma_fence { 66 spinlock_t *lock; 67 const struct dma_fence_ops *ops; 68 /* 69 * We clear the callback list on kref_put so that by the time we 70 * release the fence it is unused. No one should be adding to the 71 * cb_list that they don't themselves hold a reference for. 72 * 73 * The lifetime of the timestamp is similarly tied to both the 74 * rcu freelist and the cb_list. The timestamp is only set upon 75 * signaling while simultaneously notifying the cb_list. Ergo, we 76 * only use either the cb_list of timestamp. Upon destruction, 77 * neither are accessible, and so we can use the rcu. This means 78 * that the cb_list is *only* valid until the signal bit is set, 79 * and to read either you *must* hold a reference to the fence, 80 * and not just the rcu_read_lock. 81 * 82 * Listed in chronological order. 83 */ 84 union { 85 struct list_head cb_list; 86 /* @cb_list replaced by @timestamp on dma_fence_signal() */ 87 ktime_t timestamp; 88 /* @timestamp replaced by @rcu on dma_fence_release() */ 89 struct rcu_head rcu; 90 }; 91 u64 context; 92 u64 seqno; 93 unsigned long flags; 94 struct kref refcount; 95 int error; 96}; 97 98enum dma_fence_flag_bits { 99 DMA_FENCE_FLAG_SIGNALED_BIT, 100 DMA_FENCE_FLAG_TIMESTAMP_BIT, 101 DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 102 DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ 103}; 104 105typedef void (*dma_fence_func_t)(struct dma_fence *fence, 106 struct dma_fence_cb *cb); 107 108/** 109 * struct dma_fence_cb - callback for dma_fence_add_callback() 110 * @node: used by dma_fence_add_callback() to append this struct to fence::cb_list 111 * @func: dma_fence_func_t to call 112 * 113 * This struct will be initialized by dma_fence_add_callback(), additional 114 * data can be passed along by embedding dma_fence_cb in another struct. 115 */ 116struct dma_fence_cb { 117 struct list_head node; 118 dma_fence_func_t func; 119}; 120 121/** 122 * struct dma_fence_ops - operations implemented for fence 123 * 124 */ 125struct dma_fence_ops { 126 /** 127 * @use_64bit_seqno: 128 * 129 * True if this dma_fence implementation uses 64bit seqno, false 130 * otherwise. 131 */ 132 bool use_64bit_seqno; 133 134 /** 135 * @get_driver_name: 136 * 137 * Returns the driver name. This is a callback to allow drivers to 138 * compute the name at runtime, without having it to store permanently 139 * for each fence, or build a cache of some sort. 140 * 141 * This callback is mandatory. 142 */ 143 const char * (*get_driver_name)(struct dma_fence *fence); 144 145 /** 146 * @get_timeline_name: 147 * 148 * Return the name of the context this fence belongs to. This is a 149 * callback to allow drivers to compute the name at runtime, without 150 * having it to store permanently for each fence, or build a cache of 151 * some sort. 152 * 153 * This callback is mandatory. 154 */ 155 const char * (*get_timeline_name)(struct dma_fence *fence); 156 157 /** 158 * @enable_signaling: 159 * 160 * Enable software signaling of fence. 161 * 162 * For fence implementations that have the capability for hw->hw 163 * signaling, they can implement this op to enable the necessary 164 * interrupts, or insert commands into cmdstream, etc, to avoid these 165 * costly operations for the common case where only hw->hw 166 * synchronization is required. This is called in the first 167 * dma_fence_wait() or dma_fence_add_callback() path to let the fence 168 * implementation know that there is another driver waiting on the 169 * signal (ie. hw->sw case). 170 * 171 * This function can be called from atomic context, but not 172 * from irq context, so normal spinlocks can be used. 173 * 174 * A return value of false indicates the fence already passed, 175 * or some failure occurred that made it impossible to enable 176 * signaling. True indicates successful enabling. 177 * 178 * &dma_fence.error may be set in enable_signaling, but only when false 179 * is returned. 180 * 181 * Since many implementations can call dma_fence_signal() even when before 182 * @enable_signaling has been called there's a race window, where the 183 * dma_fence_signal() might result in the final fence reference being 184 * released and its memory freed. To avoid this, implementations of this 185 * callback should grab their own reference using dma_fence_get(), to be 186 * released when the fence is signalled (through e.g. the interrupt 187 * handler). 188 * 189 * This callback is optional. If this callback is not present, then the 190 * driver must always have signaling enabled. 191 */ 192 bool (*enable_signaling)(struct dma_fence *fence); 193 194 /** 195 * @signaled: 196 * 197 * Peek whether the fence is signaled, as a fastpath optimization for 198 * e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this 199 * callback does not need to make any guarantees beyond that a fence 200 * once indicates as signalled must always return true from this 201 * callback. This callback may return false even if the fence has 202 * completed already, in this case information hasn't propogated throug 203 * the system yet. See also dma_fence_is_signaled(). 204 * 205 * May set &dma_fence.error if returning true. 206 * 207 * This callback is optional. 208 */ 209 bool (*signaled)(struct dma_fence *fence); 210 211 /** 212 * @wait: 213 * 214 * Custom wait implementation, defaults to dma_fence_default_wait() if 215 * not set. 216 * 217 * The dma_fence_default_wait implementation should work for any fence, as long 218 * as @enable_signaling works correctly. This hook allows drivers to 219 * have an optimized version for the case where a process context is 220 * already available, e.g. if @enable_signaling for the general case 221 * needs to set up a worker thread. 222 * 223 * Must return -ERESTARTSYS if the wait is intr = true and the wait was 224 * interrupted, and remaining jiffies if fence has signaled, or 0 if wait 225 * timed out. Can also return other error values on custom implementations, 226 * which should be treated as if the fence is signaled. For example a hardware 227 * lockup could be reported like that. 228 * 229 * This callback is optional. 230 */ 231 signed long (*wait)(struct dma_fence *fence, 232 bool intr, signed long timeout); 233 234 /** 235 * @release: 236 * 237 * Called on destruction of fence to release additional resources. 238 * Can be called from irq context. This callback is optional. If it is 239 * NULL, then dma_fence_free() is instead called as the default 240 * implementation. 241 */ 242 void (*release)(struct dma_fence *fence); 243 244 /** 245 * @fence_value_str: 246 * 247 * Callback to fill in free-form debug info specific to this fence, like 248 * the sequence number. 249 * 250 * This callback is optional. 251 */ 252 void (*fence_value_str)(struct dma_fence *fence, char *str, int size); 253 254 /** 255 * @timeline_value_str: 256 * 257 * Fills in the current value of the timeline as a string, like the 258 * sequence number. Note that the specific fence passed to this function 259 * should not matter, drivers should only use it to look up the 260 * corresponding timeline structures. 261 */ 262 void (*timeline_value_str)(struct dma_fence *fence, 263 char *str, int size); 264}; 265 266void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, 267 spinlock_t *lock, u64 context, u64 seqno); 268 269void dma_fence_release(struct kref *kref); 270void dma_fence_free(struct dma_fence *fence); 271 272/** 273 * dma_fence_put - decreases refcount of the fence 274 * @fence: fence to reduce refcount of 275 */ 276static inline void dma_fence_put(struct dma_fence *fence) 277{ 278 if (fence) 279 kref_put(&fence->refcount, dma_fence_release); 280} 281 282/** 283 * dma_fence_get - increases refcount of the fence 284 * @fence: fence to increase refcount of 285 * 286 * Returns the same fence, with refcount increased by 1. 287 */ 288static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) 289{ 290 if (fence) 291 kref_get(&fence->refcount); 292 return fence; 293} 294 295/** 296 * dma_fence_get_rcu - get a fence from a dma_resv_list with 297 * rcu read lock 298 * @fence: fence to increase refcount of 299 * 300 * Function returns NULL if no refcount could be obtained, or the fence. 301 */ 302static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) 303{ 304 if (kref_get_unless_zero(&fence->refcount)) 305 return fence; 306 else 307 return NULL; 308} 309 310/** 311 * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence 312 * @fencep: pointer to fence to increase refcount of 313 * 314 * Function returns NULL if no refcount could be obtained, or the fence. 315 * This function handles acquiring a reference to a fence that may be 316 * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU), 317 * so long as the caller is using RCU on the pointer to the fence. 318 * 319 * An alternative mechanism is to employ a seqlock to protect a bunch of 320 * fences, such as used by struct dma_resv. When using a seqlock, 321 * the seqlock must be taken before and checked after a reference to the 322 * fence is acquired (as shown here). 323 * 324 * The caller is required to hold the RCU read lock. 325 */ 326static inline struct dma_fence * 327dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep) 328{ 329 do { 330 struct dma_fence *fence; 331 332 fence = rcu_dereference(*fencep); 333 if (!fence) 334 return NULL; 335 336 if (!dma_fence_get_rcu(fence)) 337 continue; 338 339 /* The atomic_inc_not_zero() inside dma_fence_get_rcu() 340 * provides a full memory barrier upon success (such as now). 341 * This is paired with the write barrier from assigning 342 * to the __rcu protected fence pointer so that if that 343 * pointer still matches the current fence, we know we 344 * have successfully acquire a reference to it. If it no 345 * longer matches, we are holding a reference to some other 346 * reallocated pointer. This is possible if the allocator 347 * is using a freelist like SLAB_TYPESAFE_BY_RCU where the 348 * fence remains valid for the RCU grace period, but it 349 * may be reallocated. When using such allocators, we are 350 * responsible for ensuring the reference we get is to 351 * the right fence, as below. 352 */ 353 if (fence == rcu_access_pointer(*fencep)) 354 return rcu_pointer_handoff(fence); 355 356 dma_fence_put(fence); 357 } while (1); 358} 359 360int dma_fence_signal(struct dma_fence *fence); 361int dma_fence_signal_locked(struct dma_fence *fence); 362signed long dma_fence_default_wait(struct dma_fence *fence, 363 bool intr, signed long timeout); 364int dma_fence_add_callback(struct dma_fence *fence, 365 struct dma_fence_cb *cb, 366 dma_fence_func_t func); 367bool dma_fence_remove_callback(struct dma_fence *fence, 368 struct dma_fence_cb *cb); 369void dma_fence_enable_sw_signaling(struct dma_fence *fence); 370 371/** 372 * dma_fence_is_signaled_locked - Return an indication if the fence 373 * is signaled yet. 374 * @fence: the fence to check 375 * 376 * Returns true if the fence was already signaled, false if not. Since this 377 * function doesn't enable signaling, it is not guaranteed to ever return 378 * true if dma_fence_add_callback(), dma_fence_wait() or 379 * dma_fence_enable_sw_signaling() haven't been called before. 380 * 381 * This function requires &dma_fence.lock to be held. 382 * 383 * See also dma_fence_is_signaled(). 384 */ 385static inline bool 386dma_fence_is_signaled_locked(struct dma_fence *fence) 387{ 388 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 389 return true; 390 391 if (fence->ops->signaled && fence->ops->signaled(fence)) { 392 dma_fence_signal_locked(fence); 393 return true; 394 } 395 396 return false; 397} 398 399/** 400 * dma_fence_is_signaled - Return an indication if the fence is signaled yet. 401 * @fence: the fence to check 402 * 403 * Returns true if the fence was already signaled, false if not. Since this 404 * function doesn't enable signaling, it is not guaranteed to ever return 405 * true if dma_fence_add_callback(), dma_fence_wait() or 406 * dma_fence_enable_sw_signaling() haven't been called before. 407 * 408 * It's recommended for seqno fences to call dma_fence_signal when the 409 * operation is complete, it makes it possible to prevent issues from 410 * wraparound between time of issue and time of use by checking the return 411 * value of this function before calling hardware-specific wait instructions. 412 * 413 * See also dma_fence_is_signaled_locked(). 414 */ 415static inline bool 416dma_fence_is_signaled(struct dma_fence *fence) 417{ 418 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 419 return true; 420 421 if (fence->ops->signaled && fence->ops->signaled(fence)) { 422 dma_fence_signal(fence); 423 return true; 424 } 425 426 return false; 427} 428 429/** 430 * __dma_fence_is_later - return if f1 is chronologically later than f2 431 * @f1: the first fence's seqno 432 * @f2: the second fence's seqno from the same context 433 * @ops: dma_fence_ops associated with the seqno 434 * 435 * Returns true if f1 is chronologically later than f2. Both fences must be 436 * from the same context, since a seqno is not common across contexts. 437 */ 438static inline bool __dma_fence_is_later(u64 f1, u64 f2, 439 const struct dma_fence_ops *ops) 440{ 441 /* This is for backward compatibility with drivers which can only handle 442 * 32bit sequence numbers. Use a 64bit compare when the driver says to 443 * do so. 444 */ 445 if (ops->use_64bit_seqno) 446 return f1 > f2; 447 448 return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0; 449} 450 451/** 452 * dma_fence_is_later - return if f1 is chronologically later than f2 453 * @f1: the first fence from the same context 454 * @f2: the second fence from the same context 455 * 456 * Returns true if f1 is chronologically later than f2. Both fences must be 457 * from the same context, since a seqno is not re-used across contexts. 458 */ 459static inline bool dma_fence_is_later(struct dma_fence *f1, 460 struct dma_fence *f2) 461{ 462 if (WARN_ON(f1->context != f2->context)) 463 return false; 464 465 return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops); 466} 467 468/** 469 * dma_fence_later - return the chronologically later fence 470 * @f1: the first fence from the same context 471 * @f2: the second fence from the same context 472 * 473 * Returns NULL if both fences are signaled, otherwise the fence that would be 474 * signaled last. Both fences must be from the same context, since a seqno is 475 * not re-used across contexts. 476 */ 477static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, 478 struct dma_fence *f2) 479{ 480 if (WARN_ON(f1->context != f2->context)) 481 return NULL; 482 483 /* 484 * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never 485 * have been set if enable_signaling wasn't called, and enabling that 486 * here is overkill. 487 */ 488 if (dma_fence_is_later(f1, f2)) 489 return dma_fence_is_signaled(f1) ? NULL : f1; 490 else 491 return dma_fence_is_signaled(f2) ? NULL : f2; 492} 493 494/** 495 * dma_fence_get_status_locked - returns the status upon completion 496 * @fence: the dma_fence to query 497 * 498 * Drivers can supply an optional error status condition before they signal 499 * the fence (to indicate whether the fence was completed due to an error 500 * rather than success). The value of the status condition is only valid 501 * if the fence has been signaled, dma_fence_get_status_locked() first checks 502 * the signal state before reporting the error status. 503 * 504 * Returns 0 if the fence has not yet been signaled, 1 if the fence has 505 * been signaled without an error condition, or a negative error code 506 * if the fence has been completed in err. 507 */ 508static inline int dma_fence_get_status_locked(struct dma_fence *fence) 509{ 510 if (dma_fence_is_signaled_locked(fence)) 511 return fence->error ?: 1; 512 else 513 return 0; 514} 515 516int dma_fence_get_status(struct dma_fence *fence); 517 518/** 519 * dma_fence_set_error - flag an error condition on the fence 520 * @fence: the dma_fence 521 * @error: the error to store 522 * 523 * Drivers can supply an optional error status condition before they signal 524 * the fence, to indicate that the fence was completed due to an error 525 * rather than success. This must be set before signaling (so that the value 526 * is visible before any waiters on the signal callback are woken). This 527 * helper exists to help catching erroneous setting of #dma_fence.error. 528 */ 529static inline void dma_fence_set_error(struct dma_fence *fence, 530 int error) 531{ 532 WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); 533 WARN_ON(error >= 0 || error < -MAX_ERRNO); 534 535 fence->error = error; 536} 537 538signed long dma_fence_wait_timeout(struct dma_fence *, 539 bool intr, signed long timeout); 540signed long dma_fence_wait_any_timeout(struct dma_fence **fences, 541 uint32_t count, 542 bool intr, signed long timeout, 543 uint32_t *idx); 544 545/** 546 * dma_fence_wait - sleep until the fence gets signaled 547 * @fence: the fence to wait on 548 * @intr: if true, do an interruptible wait 549 * 550 * This function will return -ERESTARTSYS if interrupted by a signal, 551 * or 0 if the fence was signaled. Other error values may be 552 * returned on custom implementations. 553 * 554 * Performs a synchronous wait on this fence. It is assumed the caller 555 * directly or indirectly holds a reference to the fence, otherwise the 556 * fence might be freed before return, resulting in undefined behavior. 557 * 558 * See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout(). 559 */ 560static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) 561{ 562 signed long ret; 563 564 /* Since dma_fence_wait_timeout cannot timeout with 565 * MAX_SCHEDULE_TIMEOUT, only valid return values are 566 * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. 567 */ 568 ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); 569 570 return ret < 0 ? ret : 0; 571} 572 573struct dma_fence *dma_fence_get_stub(void); 574u64 dma_fence_context_alloc(unsigned num); 575 576#define DMA_FENCE_TRACE(f, fmt, args...) \ 577 do { \ 578 struct dma_fence *__ff = (f); \ 579 if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ 580 pr_info("f %llu#%llu: " fmt, \ 581 __ff->context, __ff->seqno, ##args); \ 582 } while (0) 583 584#define DMA_FENCE_WARN(f, fmt, args...) \ 585 do { \ 586 struct dma_fence *__ff = (f); \ 587 pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\ 588 ##args); \ 589 } while (0) 590 591#define DMA_FENCE_ERR(f, fmt, args...) \ 592 do { \ 593 struct dma_fence *__ff = (f); \ 594 pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \ 595 ##args); \ 596 } while (0) 597 598#endif /* __LINUX_DMA_FENCE_H */ 599