linux/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
<<
>>
Prefs
   1/*
   2 * Copyright 2011 Red Hat Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 */
  30/* Algorithm:
  31 *
  32 * We store the last allocated bo in "hole", we always try to allocate
  33 * after the last allocated bo. Principle is that in a linear GPU ring
  34 * progression was is after last is the oldest bo we allocated and thus
  35 * the first one that should no longer be in use by the GPU.
  36 *
  37 * If it's not the case we skip over the bo after last to the closest
  38 * done bo if such one exist. If none exist and we are not asked to
  39 * block we report failure to allocate.
  40 *
  41 * If we are asked to block we wait on all the oldest fence of all
  42 * rings. We just wait for any of those fence to complete.
  43 */
  44#include <drm/drmP.h>
  45#include "amdgpu.h"
  46
  47static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
  48static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
  49
  50int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
  51                              struct amdgpu_sa_manager *sa_manager,
  52                              unsigned size, u32 align, u32 domain)
  53{
  54        int i, r;
  55
  56        init_waitqueue_head(&sa_manager->wq);
  57        sa_manager->bo = NULL;
  58        sa_manager->size = size;
  59        sa_manager->domain = domain;
  60        sa_manager->align = align;
  61        sa_manager->hole = &sa_manager->olist;
  62        INIT_LIST_HEAD(&sa_manager->olist);
  63        for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
  64                INIT_LIST_HEAD(&sa_manager->flist[i]);
  65
  66        r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo,
  67                                &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
  68        if (r) {
  69                dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
  70                return r;
  71        }
  72
  73        memset(sa_manager->cpu_ptr, 0, sa_manager->size);
  74        return r;
  75}
  76
  77void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
  78                              struct amdgpu_sa_manager *sa_manager)
  79{
  80        struct amdgpu_sa_bo *sa_bo, *tmp;
  81
  82        if (sa_manager->bo == NULL) {
  83                dev_err(adev->dev, "no bo for sa manager\n");
  84                return;
  85        }
  86
  87        if (!list_empty(&sa_manager->olist)) {
  88                sa_manager->hole = &sa_manager->olist,
  89                amdgpu_sa_bo_try_free(sa_manager);
  90                if (!list_empty(&sa_manager->olist)) {
  91                        dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
  92                }
  93        }
  94        list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
  95                amdgpu_sa_bo_remove_locked(sa_bo);
  96        }
  97
  98        amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
  99        sa_manager->size = 0;
 100}
 101
 102static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
 103{
 104        struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
 105        if (sa_manager->hole == &sa_bo->olist) {
 106                sa_manager->hole = sa_bo->olist.prev;
 107        }
 108        list_del_init(&sa_bo->olist);
 109        list_del_init(&sa_bo->flist);
 110        dma_fence_put(sa_bo->fence);
 111        kfree(sa_bo);
 112}
 113
 114static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
 115{
 116        struct amdgpu_sa_bo *sa_bo, *tmp;
 117
 118        if (sa_manager->hole->next == &sa_manager->olist)
 119                return;
 120
 121        sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
 122        list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
 123                if (sa_bo->fence == NULL ||
 124                    !dma_fence_is_signaled(sa_bo->fence)) {
 125                        return;
 126                }
 127                amdgpu_sa_bo_remove_locked(sa_bo);
 128        }
 129}
 130
 131static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
 132{
 133        struct list_head *hole = sa_manager->hole;
 134
 135        if (hole != &sa_manager->olist) {
 136                return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
 137        }
 138        return 0;
 139}
 140
 141static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
 142{
 143        struct list_head *hole = sa_manager->hole;
 144
 145        if (hole->next != &sa_manager->olist) {
 146                return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
 147        }
 148        return sa_manager->size;
 149}
 150
 151static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
 152                                   struct amdgpu_sa_bo *sa_bo,
 153                                   unsigned size, unsigned align)
 154{
 155        unsigned soffset, eoffset, wasted;
 156
 157        soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
 158        eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
 159        wasted = (align - (soffset % align)) % align;
 160
 161        if ((eoffset - soffset) >= (size + wasted)) {
 162                soffset += wasted;
 163
 164                sa_bo->manager = sa_manager;
 165                sa_bo->soffset = soffset;
 166                sa_bo->eoffset = soffset + size;
 167                list_add(&sa_bo->olist, sa_manager->hole);
 168                INIT_LIST_HEAD(&sa_bo->flist);
 169                sa_manager->hole = &sa_bo->olist;
 170                return true;
 171        }
 172        return false;
 173}
 174
 175/**
 176 * amdgpu_sa_event - Check if we can stop waiting
 177 *
 178 * @sa_manager: pointer to the sa_manager
 179 * @size: number of bytes we want to allocate
 180 * @align: alignment we need to match
 181 *
 182 * Check if either there is a fence we can wait for or
 183 * enough free memory to satisfy the allocation directly
 184 */
 185static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
 186                            unsigned size, unsigned align)
 187{
 188        unsigned soffset, eoffset, wasted;
 189        int i;
 190
 191        for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
 192                if (!list_empty(&sa_manager->flist[i]))
 193                        return true;
 194
 195        soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
 196        eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
 197        wasted = (align - (soffset % align)) % align;
 198
 199        if ((eoffset - soffset) >= (size + wasted)) {
 200                return true;
 201        }
 202
 203        return false;
 204}
 205
 206static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
 207                                   struct dma_fence **fences,
 208                                   unsigned *tries)
 209{
 210        struct amdgpu_sa_bo *best_bo = NULL;
 211        unsigned i, soffset, best, tmp;
 212
 213        /* if hole points to the end of the buffer */
 214        if (sa_manager->hole->next == &sa_manager->olist) {
 215                /* try again with its beginning */
 216                sa_manager->hole = &sa_manager->olist;
 217                return true;
 218        }
 219
 220        soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
 221        /* to handle wrap around we add sa_manager->size */
 222        best = sa_manager->size * 2;
 223        /* go over all fence list and try to find the closest sa_bo
 224         * of the current last
 225         */
 226        for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
 227                struct amdgpu_sa_bo *sa_bo;
 228
 229                if (list_empty(&sa_manager->flist[i]))
 230                        continue;
 231
 232                sa_bo = list_first_entry(&sa_manager->flist[i],
 233                                         struct amdgpu_sa_bo, flist);
 234
 235                if (!dma_fence_is_signaled(sa_bo->fence)) {
 236                        fences[i] = sa_bo->fence;
 237                        continue;
 238                }
 239
 240                /* limit the number of tries each ring gets */
 241                if (tries[i] > 2) {
 242                        continue;
 243                }
 244
 245                tmp = sa_bo->soffset;
 246                if (tmp < soffset) {
 247                        /* wrap around, pretend it's after */
 248                        tmp += sa_manager->size;
 249                }
 250                tmp -= soffset;
 251                if (tmp < best) {
 252                        /* this sa bo is the closest one */
 253                        best = tmp;
 254                        best_bo = sa_bo;
 255                }
 256        }
 257
 258        if (best_bo) {
 259                uint32_t idx = best_bo->fence->context;
 260
 261                idx %= AMDGPU_SA_NUM_FENCE_LISTS;
 262                ++tries[idx];
 263                sa_manager->hole = best_bo->olist.prev;
 264
 265                /* we knew that this one is signaled,
 266                   so it's save to remote it */
 267                amdgpu_sa_bo_remove_locked(best_bo);
 268                return true;
 269        }
 270        return false;
 271}
 272
 273int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
 274                     struct amdgpu_sa_bo **sa_bo,
 275                     unsigned size, unsigned align)
 276{
 277        struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
 278        unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
 279        unsigned count;
 280        int i, r;
 281        signed long t;
 282
 283        if (WARN_ON_ONCE(align > sa_manager->align))
 284                return -EINVAL;
 285
 286        if (WARN_ON_ONCE(size > sa_manager->size))
 287                return -EINVAL;
 288
 289        *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
 290        if (!(*sa_bo))
 291                return -ENOMEM;
 292        (*sa_bo)->manager = sa_manager;
 293        (*sa_bo)->fence = NULL;
 294        INIT_LIST_HEAD(&(*sa_bo)->olist);
 295        INIT_LIST_HEAD(&(*sa_bo)->flist);
 296
 297        spin_lock(&sa_manager->wq.lock);
 298        do {
 299                for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
 300                        fences[i] = NULL;
 301                        tries[i] = 0;
 302                }
 303
 304                do {
 305                        amdgpu_sa_bo_try_free(sa_manager);
 306
 307                        if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
 308                                                   size, align)) {
 309                                spin_unlock(&sa_manager->wq.lock);
 310                                return 0;
 311                        }
 312
 313                        /* see if we can skip over some allocations */
 314                } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
 315
 316                for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
 317                        if (fences[i])
 318                                fences[count++] = dma_fence_get(fences[i]);
 319
 320                if (count) {
 321                        spin_unlock(&sa_manager->wq.lock);
 322                        t = dma_fence_wait_any_timeout(fences, count, false,
 323                                                       MAX_SCHEDULE_TIMEOUT,
 324                                                       NULL);
 325                        for (i = 0; i < count; ++i)
 326                                dma_fence_put(fences[i]);
 327
 328                        r = (t > 0) ? 0 : t;
 329                        spin_lock(&sa_manager->wq.lock);
 330                } else {
 331                        /* if we have nothing to wait for block */
 332                        r = wait_event_interruptible_locked(
 333                                sa_manager->wq,
 334                                amdgpu_sa_event(sa_manager, size, align)
 335                        );
 336                }
 337
 338        } while (!r);
 339
 340        spin_unlock(&sa_manager->wq.lock);
 341        kfree(*sa_bo);
 342        *sa_bo = NULL;
 343        return r;
 344}
 345
 346void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
 347                       struct dma_fence *fence)
 348{
 349        struct amdgpu_sa_manager *sa_manager;
 350
 351        if (sa_bo == NULL || *sa_bo == NULL) {
 352                return;
 353        }
 354
 355        sa_manager = (*sa_bo)->manager;
 356        spin_lock(&sa_manager->wq.lock);
 357        if (fence && !dma_fence_is_signaled(fence)) {
 358                uint32_t idx;
 359
 360                (*sa_bo)->fence = dma_fence_get(fence);
 361                idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
 362                list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
 363        } else {
 364                amdgpu_sa_bo_remove_locked(*sa_bo);
 365        }
 366        wake_up_all_locked(&sa_manager->wq);
 367        spin_unlock(&sa_manager->wq.lock);
 368        *sa_bo = NULL;
 369}
 370
 371#if defined(CONFIG_DEBUG_FS)
 372
 373void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
 374                                  struct seq_file *m)
 375{
 376        struct amdgpu_sa_bo *i;
 377
 378        spin_lock(&sa_manager->wq.lock);
 379        list_for_each_entry(i, &sa_manager->olist, olist) {
 380                uint64_t soffset = i->soffset + sa_manager->gpu_addr;
 381                uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
 382                if (&i->olist == sa_manager->hole) {
 383                        seq_printf(m, ">");
 384                } else {
 385                        seq_printf(m, " ");
 386                }
 387                seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
 388                           soffset, eoffset, eoffset - soffset);
 389
 390                if (i->fence)
 391                        seq_printf(m, " protected by 0x%08x on context %llu",
 392                                   i->fence->seqno, i->fence->context);
 393
 394                seq_printf(m, "\n");
 395        }
 396        spin_unlock(&sa_manager->wq.lock);
 397}
 398#endif
 399