linux/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Christian König <christian.koenig@amd.com>
  29 */
  30
  31#include <drm/drmP.h>
  32#include "amdgpu.h"
  33#include "amdgpu_trace.h"
  34
  35struct amdgpu_sync_entry {
  36        struct hlist_node       node;
  37        struct fence            *fence;
  38};
  39
  40static struct kmem_cache *amdgpu_sync_slab;
  41
  42/**
  43 * amdgpu_sync_create - zero init sync object
  44 *
  45 * @sync: sync object to initialize
  46 *
  47 * Just clear the sync object for now.
  48 */
  49void amdgpu_sync_create(struct amdgpu_sync *sync)
  50{
  51        hash_init(sync->fences);
  52        sync->last_vm_update = NULL;
  53}
  54
  55/**
  56 * amdgpu_sync_same_dev - test if fence belong to us
  57 *
  58 * @adev: amdgpu device to use for the test
  59 * @f: fence to test
  60 *
  61 * Test if the fence was issued by us.
  62 */
  63static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
  64{
  65        struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
  66
  67        if (s_fence) {
  68                struct amdgpu_ring *ring;
  69
  70                ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
  71                return ring->adev == adev;
  72        }
  73
  74        return false;
  75}
  76
  77/**
  78 * amdgpu_sync_get_owner - extract the owner of a fence
  79 *
  80 * @fence: fence get the owner from
  81 *
  82 * Extract who originally created the fence.
  83 */
  84static void *amdgpu_sync_get_owner(struct fence *f)
  85{
  86        struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
  87
  88        if (s_fence)
  89                return s_fence->owner;
  90
  91        return AMDGPU_FENCE_OWNER_UNDEFINED;
  92}
  93
  94/**
  95 * amdgpu_sync_keep_later - Keep the later fence
  96 *
  97 * @keep: existing fence to test
  98 * @fence: new fence
  99 *
 100 * Either keep the existing fence or the new one, depending which one is later.
 101 */
 102static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
 103{
 104        if (*keep && fence_is_later(*keep, fence))
 105                return;
 106
 107        fence_put(*keep);
 108        *keep = fence_get(fence);
 109}
 110
 111/**
 112 * amdgpu_sync_fence - remember to sync to this fence
 113 *
 114 * @sync: sync object to add fence to
 115 * @fence: fence to sync to
 116 *
 117 */
 118int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 119                      struct fence *f)
 120{
 121        struct amdgpu_sync_entry *e;
 122
 123        if (!f)
 124                return 0;
 125
 126        if (amdgpu_sync_same_dev(adev, f) &&
 127            amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
 128                amdgpu_sync_keep_later(&sync->last_vm_update, f);
 129
 130        hash_for_each_possible(sync->fences, e, node, f->context) {
 131                if (unlikely(e->fence->context != f->context))
 132                        continue;
 133
 134                amdgpu_sync_keep_later(&e->fence, f);
 135                return 0;
 136        }
 137
 138        e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
 139        if (!e)
 140                return -ENOMEM;
 141
 142        hash_add(sync->fences, &e->node, f->context);
 143        e->fence = fence_get(f);
 144        return 0;
 145}
 146
 147/**
 148 * amdgpu_sync_resv - sync to a reservation object
 149 *
 150 * @sync: sync object to add fences from reservation object to
 151 * @resv: reservation object with embedded fence
 152 * @shared: true if we should only sync to the exclusive fence
 153 *
 154 * Sync to the fence
 155 */
 156int amdgpu_sync_resv(struct amdgpu_device *adev,
 157                     struct amdgpu_sync *sync,
 158                     struct reservation_object *resv,
 159                     void *owner)
 160{
 161        struct reservation_object_list *flist;
 162        struct fence *f;
 163        void *fence_owner;
 164        unsigned i;
 165        int r = 0;
 166
 167        if (resv == NULL)
 168                return -EINVAL;
 169
 170        /* always sync to the exclusive fence */
 171        f = reservation_object_get_excl(resv);
 172        r = amdgpu_sync_fence(adev, sync, f);
 173
 174        flist = reservation_object_get_list(resv);
 175        if (!flist || r)
 176                return r;
 177
 178        for (i = 0; i < flist->shared_count; ++i) {
 179                f = rcu_dereference_protected(flist->shared[i],
 180                                              reservation_object_held(resv));
 181                if (amdgpu_sync_same_dev(adev, f)) {
 182                        /* VM updates are only interesting
 183                         * for other VM updates and moves.
 184                         */
 185                        fence_owner = amdgpu_sync_get_owner(f);
 186                        if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
 187                            (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
 188                            ((owner == AMDGPU_FENCE_OWNER_VM) !=
 189                             (fence_owner == AMDGPU_FENCE_OWNER_VM)))
 190                                continue;
 191
 192                        /* Ignore fence from the same owner as
 193                         * long as it isn't undefined.
 194                         */
 195                        if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
 196                            fence_owner == owner)
 197                                continue;
 198                }
 199
 200                r = amdgpu_sync_fence(adev, sync, f);
 201                if (r)
 202                        break;
 203        }
 204        return r;
 205}
 206
 207struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
 208{
 209        struct amdgpu_sync_entry *e;
 210        struct hlist_node *tmp;
 211        struct fence *f;
 212        int i;
 213
 214        hash_for_each_safe(sync->fences, i, tmp, e, node) {
 215
 216                f = e->fence;
 217
 218                hash_del(&e->node);
 219                kmem_cache_free(amdgpu_sync_slab, e);
 220
 221                if (!fence_is_signaled(f))
 222                        return f;
 223
 224                fence_put(f);
 225        }
 226        return NULL;
 227}
 228
 229int amdgpu_sync_wait(struct amdgpu_sync *sync)
 230{
 231        struct amdgpu_sync_entry *e;
 232        struct hlist_node *tmp;
 233        int i, r;
 234
 235        hash_for_each_safe(sync->fences, i, tmp, e, node) {
 236                r = fence_wait(e->fence, false);
 237                if (r)
 238                        return r;
 239
 240                hash_del(&e->node);
 241                fence_put(e->fence);
 242                kmem_cache_free(amdgpu_sync_slab, e);
 243        }
 244
 245        return 0;
 246}
 247
 248/**
 249 * amdgpu_sync_free - free the sync object
 250 *
 251 * @sync: sync object to use
 252 *
 253 * Free the sync object.
 254 */
 255void amdgpu_sync_free(struct amdgpu_sync *sync)
 256{
 257        struct amdgpu_sync_entry *e;
 258        struct hlist_node *tmp;
 259        unsigned i;
 260
 261        hash_for_each_safe(sync->fences, i, tmp, e, node) {
 262                hash_del(&e->node);
 263                fence_put(e->fence);
 264                kmem_cache_free(amdgpu_sync_slab, e);
 265        }
 266
 267        fence_put(sync->last_vm_update);
 268}
 269
 270/**
 271 * amdgpu_sync_init - init sync object subsystem
 272 *
 273 * Allocate the slab allocator.
 274 */
 275int amdgpu_sync_init(void)
 276{
 277        amdgpu_sync_slab = kmem_cache_create(
 278                "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
 279                SLAB_HWCACHE_ALIGN, NULL);
 280        if (!amdgpu_sync_slab)
 281                return -ENOMEM;
 282
 283        return 0;
 284}
 285
 286/**
 287 * amdgpu_sync_fini - fini sync object subsystem
 288 *
 289 * Free the slab allocator.
 290 */
 291void amdgpu_sync_fini(void)
 292{
 293        kmem_cache_destroy(amdgpu_sync_slab);
 294}
 295