linux/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Christian König <christian.koenig@amd.com>
  29 */
  30
  31#include "amdgpu.h"
  32#include "amdgpu_trace.h"
  33#include "amdgpu_amdkfd.h"
  34
  35struct amdgpu_sync_entry {
  36        struct hlist_node       node;
  37        struct dma_fence        *fence;
  38        bool    explicit;
  39};
  40
  41static struct kmem_cache *amdgpu_sync_slab;
  42
  43/**
  44 * amdgpu_sync_create - zero init sync object
  45 *
  46 * @sync: sync object to initialize
  47 *
  48 * Just clear the sync object for now.
  49 */
  50void amdgpu_sync_create(struct amdgpu_sync *sync)
  51{
  52        hash_init(sync->fences);
  53        sync->last_vm_update = NULL;
  54}
  55
  56/**
  57 * amdgpu_sync_same_dev - test if fence belong to us
  58 *
  59 * @adev: amdgpu device to use for the test
  60 * @f: fence to test
  61 *
  62 * Test if the fence was issued by us.
  63 */
  64static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
  65                                 struct dma_fence *f)
  66{
  67        struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
  68
  69        if (s_fence) {
  70                struct amdgpu_ring *ring;
  71
  72                ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
  73                return ring->adev == adev;
  74        }
  75
  76        return false;
  77}
  78
  79/**
  80 * amdgpu_sync_get_owner - extract the owner of a fence
  81 *
  82 * @fence: fence get the owner from
  83 *
  84 * Extract who originally created the fence.
  85 */
  86static void *amdgpu_sync_get_owner(struct dma_fence *f)
  87{
  88        struct drm_sched_fence *s_fence;
  89        struct amdgpu_amdkfd_fence *kfd_fence;
  90
  91        if (!f)
  92                return AMDGPU_FENCE_OWNER_UNDEFINED;
  93
  94        s_fence = to_drm_sched_fence(f);
  95        if (s_fence)
  96                return s_fence->owner;
  97
  98        kfd_fence = to_amdgpu_amdkfd_fence(f);
  99        if (kfd_fence)
 100                return AMDGPU_FENCE_OWNER_KFD;
 101
 102        return AMDGPU_FENCE_OWNER_UNDEFINED;
 103}
 104
 105/**
 106 * amdgpu_sync_keep_later - Keep the later fence
 107 *
 108 * @keep: existing fence to test
 109 * @fence: new fence
 110 *
 111 * Either keep the existing fence or the new one, depending which one is later.
 112 */
 113static void amdgpu_sync_keep_later(struct dma_fence **keep,
 114                                   struct dma_fence *fence)
 115{
 116        if (*keep && dma_fence_is_later(*keep, fence))
 117                return;
 118
 119        dma_fence_put(*keep);
 120        *keep = dma_fence_get(fence);
 121}
 122
 123/**
 124 * amdgpu_sync_add_later - add the fence to the hash
 125 *
 126 * @sync: sync object to add the fence to
 127 * @f: fence to add
 128 *
 129 * Tries to add the fence to an existing hash entry. Returns true when an entry
 130 * was found, false otherwise.
 131 */
 132static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f,
 133                                  bool explicit)
 134{
 135        struct amdgpu_sync_entry *e;
 136
 137        hash_for_each_possible(sync->fences, e, node, f->context) {
 138                if (unlikely(e->fence->context != f->context))
 139                        continue;
 140
 141                amdgpu_sync_keep_later(&e->fence, f);
 142
 143                /* Preserve eplicit flag to not loose pipe line sync */
 144                e->explicit |= explicit;
 145
 146                return true;
 147        }
 148        return false;
 149}
 150
 151/**
 152 * amdgpu_sync_fence - remember to sync to this fence
 153 *
 154 * @sync: sync object to add fence to
 155 * @f: fence to sync to
 156 * @explicit: if this is an explicit dependency
 157 *
 158 * Add the fence to the sync object.
 159 */
 160int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
 161                      bool explicit)
 162{
 163        struct amdgpu_sync_entry *e;
 164
 165        if (!f)
 166                return 0;
 167
 168        if (amdgpu_sync_add_later(sync, f, explicit))
 169                return 0;
 170
 171        e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
 172        if (!e)
 173                return -ENOMEM;
 174
 175        e->explicit = explicit;
 176
 177        hash_add(sync->fences, &e->node, f->context);
 178        e->fence = dma_fence_get(f);
 179        return 0;
 180}
 181
 182/**
 183 * amdgpu_sync_vm_fence - remember to sync to this VM fence
 184 *
 185 * @adev: amdgpu device
 186 * @sync: sync object to add fence to
 187 * @fence: the VM fence to add
 188 *
 189 * Add the fence to the sync object and remember it as VM update.
 190 */
 191int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
 192{
 193        if (!fence)
 194                return 0;
 195
 196        amdgpu_sync_keep_later(&sync->last_vm_update, fence);
 197        return amdgpu_sync_fence(sync, fence, false);
 198}
 199
 200/**
 201 * amdgpu_sync_resv - sync to a reservation object
 202 *
 203 * @sync: sync object to add fences from reservation object to
 204 * @resv: reservation object with embedded fence
 205 * @mode: how owner affects which fences we sync to
 206 * @owner: owner of the planned job submission
 207 *
 208 * Sync to the fence
 209 */
 210int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 211                     struct dma_resv *resv, enum amdgpu_sync_mode mode,
 212                     void *owner)
 213{
 214        struct dma_resv_list *flist;
 215        struct dma_fence *f;
 216        unsigned i;
 217        int r = 0;
 218
 219        if (resv == NULL)
 220                return -EINVAL;
 221
 222        /* always sync to the exclusive fence */
 223        f = dma_resv_get_excl(resv);
 224        r = amdgpu_sync_fence(sync, f, false);
 225
 226        flist = dma_resv_get_list(resv);
 227        if (!flist || r)
 228                return r;
 229
 230        for (i = 0; i < flist->shared_count; ++i) {
 231                void *fence_owner;
 232
 233                f = rcu_dereference_protected(flist->shared[i],
 234                                              dma_resv_held(resv));
 235
 236                fence_owner = amdgpu_sync_get_owner(f);
 237
 238                /* Always sync to moves, no matter what */
 239                if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
 240                        r = amdgpu_sync_fence(sync, f, false);
 241                        if (r)
 242                                break;
 243                }
 244
 245                /* We only want to trigger KFD eviction fences on
 246                 * evict or move jobs. Skip KFD fences otherwise.
 247                 */
 248                if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
 249                    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
 250                        continue;
 251
 252                /* Ignore fences depending on the sync mode */
 253                switch (mode) {
 254                case AMDGPU_SYNC_ALWAYS:
 255                        break;
 256
 257                case AMDGPU_SYNC_NE_OWNER:
 258                        if (amdgpu_sync_same_dev(adev, f) &&
 259                            fence_owner == owner)
 260                                continue;
 261                        break;
 262
 263                case AMDGPU_SYNC_EQ_OWNER:
 264                        if (amdgpu_sync_same_dev(adev, f) &&
 265                            fence_owner != owner)
 266                                continue;
 267                        break;
 268
 269                case AMDGPU_SYNC_EXPLICIT:
 270                        continue;
 271                }
 272
 273                r = amdgpu_sync_fence(sync, f, false);
 274                if (r)
 275                        break;
 276        }
 277        return r;
 278}
 279
 280/**
 281 * amdgpu_sync_peek_fence - get the next fence not signaled yet
 282 *
 283 * @sync: the sync object
 284 * @ring: optional ring to use for test
 285 *
 286 * Returns the next fence not signaled yet without removing it from the sync
 287 * object.
 288 */
 289struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
 290                                         struct amdgpu_ring *ring)
 291{
 292        struct amdgpu_sync_entry *e;
 293        struct hlist_node *tmp;
 294        int i;
 295
 296        hash_for_each_safe(sync->fences, i, tmp, e, node) {
 297                struct dma_fence *f = e->fence;
 298                struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
 299
 300                if (dma_fence_is_signaled(f)) {
 301                        hash_del(&e->node);
 302                        dma_fence_put(f);
 303                        kmem_cache_free(amdgpu_sync_slab, e);
 304                        continue;
 305                }
 306                if (ring && s_fence) {
 307                        /* For fences from the same ring it is sufficient
 308                         * when they are scheduled.
 309                         */
 310                        if (s_fence->sched == &ring->sched) {
 311                                if (dma_fence_is_signaled(&s_fence->scheduled))
 312                                        continue;
 313
 314                                return &s_fence->scheduled;
 315                        }
 316                }
 317
 318                return f;
 319        }
 320
 321        return NULL;
 322}
 323
 324/**
 325 * amdgpu_sync_get_fence - get the next fence from the sync object
 326 *
 327 * @sync: sync object to use
 328 * @explicit: true if the next fence is explicit
 329 *
 330 * Get and removes the next fence from the sync object not signaled yet.
 331 */
 332struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
 333{
 334        struct amdgpu_sync_entry *e;
 335        struct hlist_node *tmp;
 336        struct dma_fence *f;
 337        int i;
 338        hash_for_each_safe(sync->fences, i, tmp, e, node) {
 339
 340                f = e->fence;
 341                if (explicit)
 342                        *explicit = e->explicit;
 343
 344                hash_del(&e->node);
 345                kmem_cache_free(amdgpu_sync_slab, e);
 346
 347                if (!dma_fence_is_signaled(f))
 348                        return f;
 349
 350                dma_fence_put(f);
 351        }
 352        return NULL;
 353}
 354
 355/**
 356 * amdgpu_sync_clone - clone a sync object
 357 *
 358 * @source: sync object to clone
 359 * @clone: pointer to destination sync object
 360 *
 361 * Adds references to all unsignaled fences in @source to @clone. Also
 362 * removes signaled fences from @source while at it.
 363 */
 364int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
 365{
 366        struct amdgpu_sync_entry *e;
 367        struct hlist_node *tmp;
 368        struct dma_fence *f;
 369        int i, r;
 370
 371        hash_for_each_safe(source->fences, i, tmp, e, node) {
 372                f = e->fence;
 373                if (!dma_fence_is_signaled(f)) {
 374                        r = amdgpu_sync_fence(clone, f, e->explicit);
 375                        if (r)
 376                                return r;
 377                } else {
 378                        hash_del(&e->node);
 379                        dma_fence_put(f);
 380                        kmem_cache_free(amdgpu_sync_slab, e);
 381                }
 382        }
 383
 384        dma_fence_put(clone->last_vm_update);
 385        clone->last_vm_update = dma_fence_get(source->last_vm_update);
 386
 387        return 0;
 388}
 389
 390int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
 391{
 392        struct amdgpu_sync_entry *e;
 393        struct hlist_node *tmp;
 394        int i, r;
 395
 396        hash_for_each_safe(sync->fences, i, tmp, e, node) {
 397                r = dma_fence_wait(e->fence, intr);
 398                if (r)
 399                        return r;
 400
 401                hash_del(&e->node);
 402                dma_fence_put(e->fence);
 403                kmem_cache_free(amdgpu_sync_slab, e);
 404        }
 405
 406        return 0;
 407}
 408
 409/**
 410 * amdgpu_sync_free - free the sync object
 411 *
 412 * @sync: sync object to use
 413 *
 414 * Free the sync object.
 415 */
 416void amdgpu_sync_free(struct amdgpu_sync *sync)
 417{
 418        struct amdgpu_sync_entry *e;
 419        struct hlist_node *tmp;
 420        unsigned i;
 421
 422        hash_for_each_safe(sync->fences, i, tmp, e, node) {
 423                hash_del(&e->node);
 424                dma_fence_put(e->fence);
 425                kmem_cache_free(amdgpu_sync_slab, e);
 426        }
 427
 428        dma_fence_put(sync->last_vm_update);
 429}
 430
 431/**
 432 * amdgpu_sync_init - init sync object subsystem
 433 *
 434 * Allocate the slab allocator.
 435 */
 436int amdgpu_sync_init(void)
 437{
 438        amdgpu_sync_slab = kmem_cache_create(
 439                "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
 440                SLAB_HWCACHE_ALIGN, NULL);
 441        if (!amdgpu_sync_slab)
 442                return -ENOMEM;
 443
 444        return 0;
 445}
 446
 447/**
 448 * amdgpu_sync_fini - fini sync object subsystem
 449 *
 450 * Free the slab allocator.
 451 */
 452void amdgpu_sync_fini(void)
 453{
 454        kmem_cache_destroy(amdgpu_sync_slab);
 455}
 456