linux/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
<<
>>
Prefs
   1/*
   2 * Copyright 2011 Red Hat Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 */
  30/* Algorithm:
  31 *
  32 * We store the last allocated bo in "hole", we always try to allocate
  33 * after the last allocated bo. Principle is that in a linear GPU ring
  34 * progression was is after last is the oldest bo we allocated and thus
  35 * the first one that should no longer be in use by the GPU.
  36 *
  37 * If it's not the case we skip over the bo after last to the closest
  38 * done bo if such one exist. If none exist and we are not asked to
  39 * block we report failure to allocate.
  40 *
  41 * If we are asked to block we wait on all the oldest fence of all
  42 * rings. We just wait for any of those fence to complete.
  43 */
  44#include <drm/drmP.h>
  45#include "amdgpu.h"
  46
  47static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
  48static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
  49
  50int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
  51                              struct amdgpu_sa_manager *sa_manager,
  52                              unsigned size, u32 align, u32 domain)
  53{
  54        int i, r;
  55
  56        init_waitqueue_head(&sa_manager->wq);
  57        sa_manager->bo = NULL;
  58        sa_manager->size = size;
  59        sa_manager->domain = domain;
  60        sa_manager->align = align;
  61        sa_manager->hole = &sa_manager->olist;
  62        INIT_LIST_HEAD(&sa_manager->olist);
  63        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  64                INIT_LIST_HEAD(&sa_manager->flist[i]);
  65        }
  66
  67        r = amdgpu_bo_create(adev, size, align, true, domain,
  68                             0, NULL, NULL, &sa_manager->bo);
  69        if (r) {
  70                dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
  71                return r;
  72        }
  73
  74        return r;
  75}
  76
  77void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
  78                               struct amdgpu_sa_manager *sa_manager)
  79{
  80        struct amdgpu_sa_bo *sa_bo, *tmp;
  81
  82        if (!list_empty(&sa_manager->olist)) {
  83                sa_manager->hole = &sa_manager->olist,
  84                amdgpu_sa_bo_try_free(sa_manager);
  85                if (!list_empty(&sa_manager->olist)) {
  86                        dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
  87                }
  88        }
  89        list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
  90                amdgpu_sa_bo_remove_locked(sa_bo);
  91        }
  92        amdgpu_bo_unref(&sa_manager->bo);
  93        sa_manager->size = 0;
  94}
  95
  96int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
  97                               struct amdgpu_sa_manager *sa_manager)
  98{
  99        int r;
 100
 101        if (sa_manager->bo == NULL) {
 102                dev_err(adev->dev, "no bo for sa manager\n");
 103                return -EINVAL;
 104        }
 105
 106        /* map the buffer */
 107        r = amdgpu_bo_reserve(sa_manager->bo, false);
 108        if (r) {
 109                dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
 110                return r;
 111        }
 112        r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
 113        if (r) {
 114                amdgpu_bo_unreserve(sa_manager->bo);
 115                dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
 116                return r;
 117        }
 118        r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
 119        amdgpu_bo_unreserve(sa_manager->bo);
 120        return r;
 121}
 122
 123int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
 124                                 struct amdgpu_sa_manager *sa_manager)
 125{
 126        int r;
 127
 128        if (sa_manager->bo == NULL) {
 129                dev_err(adev->dev, "no bo for sa manager\n");
 130                return -EINVAL;
 131        }
 132
 133        r = amdgpu_bo_reserve(sa_manager->bo, false);
 134        if (!r) {
 135                amdgpu_bo_kunmap(sa_manager->bo);
 136                amdgpu_bo_unpin(sa_manager->bo);
 137                amdgpu_bo_unreserve(sa_manager->bo);
 138        }
 139        return r;
 140}
 141
 142static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
 143{
 144        struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
 145        if (sa_manager->hole == &sa_bo->olist) {
 146                sa_manager->hole = sa_bo->olist.prev;
 147        }
 148        list_del_init(&sa_bo->olist);
 149        list_del_init(&sa_bo->flist);
 150        fence_put(sa_bo->fence);
 151        kfree(sa_bo);
 152}
 153
 154static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
 155{
 156        struct amdgpu_sa_bo *sa_bo, *tmp;
 157
 158        if (sa_manager->hole->next == &sa_manager->olist)
 159                return;
 160
 161        sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
 162        list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
 163                if (sa_bo->fence == NULL ||
 164                    !fence_is_signaled(sa_bo->fence)) {
 165                        return;
 166                }
 167                amdgpu_sa_bo_remove_locked(sa_bo);
 168        }
 169}
 170
 171static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
 172{
 173        struct list_head *hole = sa_manager->hole;
 174
 175        if (hole != &sa_manager->olist) {
 176                return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
 177        }
 178        return 0;
 179}
 180
 181static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
 182{
 183        struct list_head *hole = sa_manager->hole;
 184
 185        if (hole->next != &sa_manager->olist) {
 186                return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
 187        }
 188        return sa_manager->size;
 189}
 190
 191static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
 192                                   struct amdgpu_sa_bo *sa_bo,
 193                                   unsigned size, unsigned align)
 194{
 195        unsigned soffset, eoffset, wasted;
 196
 197        soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
 198        eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
 199        wasted = (align - (soffset % align)) % align;
 200
 201        if ((eoffset - soffset) >= (size + wasted)) {
 202                soffset += wasted;
 203
 204                sa_bo->manager = sa_manager;
 205                sa_bo->soffset = soffset;
 206                sa_bo->eoffset = soffset + size;
 207                list_add(&sa_bo->olist, sa_manager->hole);
 208                INIT_LIST_HEAD(&sa_bo->flist);
 209                sa_manager->hole = &sa_bo->olist;
 210                return true;
 211        }
 212        return false;
 213}
 214
 215/**
 216 * amdgpu_sa_event - Check if we can stop waiting
 217 *
 218 * @sa_manager: pointer to the sa_manager
 219 * @size: number of bytes we want to allocate
 220 * @align: alignment we need to match
 221 *
 222 * Check if either there is a fence we can wait for or
 223 * enough free memory to satisfy the allocation directly
 224 */
 225static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
 226                            unsigned size, unsigned align)
 227{
 228        unsigned soffset, eoffset, wasted;
 229        int i;
 230
 231        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 232                if (!list_empty(&sa_manager->flist[i])) {
 233                        return true;
 234                }
 235        }
 236
 237        soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
 238        eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
 239        wasted = (align - (soffset % align)) % align;
 240
 241        if ((eoffset - soffset) >= (size + wasted)) {
 242                return true;
 243        }
 244
 245        return false;
 246}
 247
 248static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
 249                                   struct fence **fences,
 250                                   unsigned *tries)
 251{
 252        struct amdgpu_sa_bo *best_bo = NULL;
 253        unsigned i, soffset, best, tmp;
 254
 255        /* if hole points to the end of the buffer */
 256        if (sa_manager->hole->next == &sa_manager->olist) {
 257                /* try again with its beginning */
 258                sa_manager->hole = &sa_manager->olist;
 259                return true;
 260        }
 261
 262        soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
 263        /* to handle wrap around we add sa_manager->size */
 264        best = sa_manager->size * 2;
 265        /* go over all fence list and try to find the closest sa_bo
 266         * of the current last
 267         */
 268        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 269                struct amdgpu_sa_bo *sa_bo;
 270
 271                if (list_empty(&sa_manager->flist[i])) {
 272                        continue;
 273                }
 274
 275                sa_bo = list_first_entry(&sa_manager->flist[i],
 276                                         struct amdgpu_sa_bo, flist);
 277
 278                if (!fence_is_signaled(sa_bo->fence)) {
 279                        fences[i] = sa_bo->fence;
 280                        continue;
 281                }
 282
 283                /* limit the number of tries each ring gets */
 284                if (tries[i] > 2) {
 285                        continue;
 286                }
 287
 288                tmp = sa_bo->soffset;
 289                if (tmp < soffset) {
 290                        /* wrap around, pretend it's after */
 291                        tmp += sa_manager->size;
 292                }
 293                tmp -= soffset;
 294                if (tmp < best) {
 295                        /* this sa bo is the closest one */
 296                        best = tmp;
 297                        best_bo = sa_bo;
 298                }
 299        }
 300
 301        if (best_bo) {
 302                uint32_t idx = amdgpu_ring_from_fence(best_bo->fence)->idx;
 303                ++tries[idx];
 304                sa_manager->hole = best_bo->olist.prev;
 305
 306                /* we knew that this one is signaled,
 307                   so it's save to remote it */
 308                amdgpu_sa_bo_remove_locked(best_bo);
 309                return true;
 310        }
 311        return false;
 312}
 313
 314int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
 315                     struct amdgpu_sa_bo **sa_bo,
 316                     unsigned size, unsigned align)
 317{
 318        struct fence *fences[AMDGPU_MAX_RINGS];
 319        unsigned tries[AMDGPU_MAX_RINGS];
 320        unsigned count;
 321        int i, r;
 322        signed long t;
 323
 324        BUG_ON(align > sa_manager->align);
 325        BUG_ON(size > sa_manager->size);
 326
 327        *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
 328        if ((*sa_bo) == NULL) {
 329                return -ENOMEM;
 330        }
 331        (*sa_bo)->manager = sa_manager;
 332        (*sa_bo)->fence = NULL;
 333        INIT_LIST_HEAD(&(*sa_bo)->olist);
 334        INIT_LIST_HEAD(&(*sa_bo)->flist);
 335
 336        spin_lock(&sa_manager->wq.lock);
 337        do {
 338                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 339                        fences[i] = NULL;
 340                        tries[i] = 0;
 341                }
 342
 343                do {
 344                        amdgpu_sa_bo_try_free(sa_manager);
 345
 346                        if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
 347                                                   size, align)) {
 348                                spin_unlock(&sa_manager->wq.lock);
 349                                return 0;
 350                        }
 351
 352                        /* see if we can skip over some allocations */
 353                } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
 354
 355                for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
 356                        if (fences[i])
 357                                fences[count++] = fences[i];
 358
 359                if (count) {
 360                        spin_unlock(&sa_manager->wq.lock);
 361                        t = fence_wait_any_timeout(fences, count, false,
 362                                                   MAX_SCHEDULE_TIMEOUT);
 363                        r = (t > 0) ? 0 : t;
 364                        spin_lock(&sa_manager->wq.lock);
 365                } else {
 366                        /* if we have nothing to wait for block */
 367                        r = wait_event_interruptible_locked(
 368                                sa_manager->wq,
 369                                amdgpu_sa_event(sa_manager, size, align)
 370                        );
 371                }
 372
 373        } while (!r);
 374
 375        spin_unlock(&sa_manager->wq.lock);
 376        kfree(*sa_bo);
 377        *sa_bo = NULL;
 378        return r;
 379}
 380
 381void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
 382                       struct fence *fence)
 383{
 384        struct amdgpu_sa_manager *sa_manager;
 385
 386        if (sa_bo == NULL || *sa_bo == NULL) {
 387                return;
 388        }
 389
 390        sa_manager = (*sa_bo)->manager;
 391        spin_lock(&sa_manager->wq.lock);
 392        if (fence && !fence_is_signaled(fence)) {
 393                uint32_t idx;
 394                (*sa_bo)->fence = fence_get(fence);
 395                idx = amdgpu_ring_from_fence(fence)->idx;
 396                list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
 397        } else {
 398                amdgpu_sa_bo_remove_locked(*sa_bo);
 399        }
 400        wake_up_all_locked(&sa_manager->wq);
 401        spin_unlock(&sa_manager->wq.lock);
 402        *sa_bo = NULL;
 403}
 404
 405#if defined(CONFIG_DEBUG_FS)
 406
 407static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
 408{
 409        struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
 410        struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
 411
 412        if (a_fence)
 413                seq_printf(m, " protected by 0x%016llx on ring %d",
 414                           a_fence->seq, a_fence->ring->idx);
 415
 416        if (s_fence) {
 417                struct amdgpu_ring *ring;
 418
 419
 420                ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
 421                seq_printf(m, " protected by 0x%016x on ring %d",
 422                           s_fence->base.seqno, ring->idx);
 423        }
 424}
 425
 426void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
 427                                  struct seq_file *m)
 428{
 429        struct amdgpu_sa_bo *i;
 430
 431        spin_lock(&sa_manager->wq.lock);
 432        list_for_each_entry(i, &sa_manager->olist, olist) {
 433                uint64_t soffset = i->soffset + sa_manager->gpu_addr;
 434                uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
 435                if (&i->olist == sa_manager->hole) {
 436                        seq_printf(m, ">");
 437                } else {
 438                        seq_printf(m, " ");
 439                }
 440                seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
 441                           soffset, eoffset, eoffset - soffset);
 442                if (i->fence)
 443                        amdgpu_sa_bo_dump_fence(i->fence, m);
 444                seq_printf(m, "\n");
 445        }
 446        spin_unlock(&sa_manager->wq.lock);
 447}
 448#endif
 449