linux/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 *
  23 */
  24#include <linux/kthread.h>
  25#include <linux/wait.h>
  26#include <linux/sched.h>
  27
  28#include "amdgpu.h"
  29#include "amdgpu_trace.h"
  30
  31static void amdgpu_job_timedout(struct drm_sched_job *s_job)
  32{
  33        struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
  34        struct amdgpu_job *job = to_amdgpu_job(s_job);
  35        struct amdgpu_task_info ti;
  36        struct amdgpu_device *adev = ring->adev;
  37
  38        memset(&ti, 0, sizeof(struct amdgpu_task_info));
  39
  40        if (amdgpu_gpu_recovery &&
  41            amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
  42                DRM_ERROR("ring %s timeout, but soft recovered\n",
  43                          s_job->sched->name);
  44                return;
  45        }
  46
  47        amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
  48        DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
  49                  job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
  50                  ring->fence_drv.sync_seq);
  51        DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
  52                  ti.process_name, ti.tgid, ti.task_name, ti.pid);
  53
  54        if (amdgpu_device_should_recover_gpu(ring->adev)) {
  55                amdgpu_device_gpu_recover(ring->adev, job);
  56        } else {
  57                drm_sched_suspend_timeout(&ring->sched);
  58                if (amdgpu_sriov_vf(adev))
  59                        adev->virt.tdr_debug = true;
  60        }
  61}
  62
  63int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
  64                     struct amdgpu_job **job, struct amdgpu_vm *vm)
  65{
  66        size_t size = sizeof(struct amdgpu_job);
  67
  68        if (num_ibs == 0)
  69                return -EINVAL;
  70
  71        size += sizeof(struct amdgpu_ib) * num_ibs;
  72
  73        *job = kzalloc(size, GFP_KERNEL);
  74        if (!*job)
  75                return -ENOMEM;
  76
  77        /*
  78         * Initialize the scheduler to at least some ring so that we always
  79         * have a pointer to adev.
  80         */
  81        (*job)->base.sched = &adev->rings[0]->sched;
  82        (*job)->vm = vm;
  83        (*job)->ibs = (void *)&(*job)[1];
  84        (*job)->num_ibs = num_ibs;
  85
  86        amdgpu_sync_create(&(*job)->sync);
  87        amdgpu_sync_create(&(*job)->sched_sync);
  88        (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
  89        (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
  90
  91        return 0;
  92}
  93
  94int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
  95                enum amdgpu_ib_pool_type pool_type,
  96                struct amdgpu_job **job)
  97{
  98        int r;
  99
 100        r = amdgpu_job_alloc(adev, 1, job, NULL);
 101        if (r)
 102                return r;
 103
 104        r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
 105        if (r)
 106                kfree(*job);
 107
 108        return r;
 109}
 110
 111void amdgpu_job_free_resources(struct amdgpu_job *job)
 112{
 113        struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
 114        struct dma_fence *f;
 115        unsigned i;
 116
 117        /* use sched fence if available */
 118        f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
 119
 120        for (i = 0; i < job->num_ibs; ++i)
 121                amdgpu_ib_free(ring->adev, &job->ibs[i], f);
 122}
 123
 124static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
 125{
 126        struct amdgpu_job *job = to_amdgpu_job(s_job);
 127
 128        drm_sched_job_cleanup(s_job);
 129
 130        dma_fence_put(job->fence);
 131        amdgpu_sync_free(&job->sync);
 132        amdgpu_sync_free(&job->sched_sync);
 133        kfree(job);
 134}
 135
 136void amdgpu_job_free(struct amdgpu_job *job)
 137{
 138        amdgpu_job_free_resources(job);
 139
 140        dma_fence_put(job->fence);
 141        amdgpu_sync_free(&job->sync);
 142        amdgpu_sync_free(&job->sched_sync);
 143        kfree(job);
 144}
 145
 146int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
 147                      void *owner, struct dma_fence **f)
 148{
 149        int r;
 150
 151        if (!f)
 152                return -EINVAL;
 153
 154        r = drm_sched_job_init(&job->base, entity, owner);
 155        if (r)
 156                return r;
 157
 158        *f = dma_fence_get(&job->base.s_fence->finished);
 159        amdgpu_job_free_resources(job);
 160        drm_sched_entity_push_job(&job->base, entity);
 161
 162        return 0;
 163}
 164
 165int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
 166                             struct dma_fence **fence)
 167{
 168        int r;
 169
 170        job->base.sched = &ring->sched;
 171        r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
 172        job->fence = dma_fence_get(*fence);
 173        if (r)
 174                return r;
 175
 176        amdgpu_job_free(job);
 177        return 0;
 178}
 179
 180static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
 181                                               struct drm_sched_entity *s_entity)
 182{
 183        struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
 184        struct amdgpu_job *job = to_amdgpu_job(sched_job);
 185        struct amdgpu_vm *vm = job->vm;
 186        struct dma_fence *fence;
 187        int r;
 188
 189        fence = amdgpu_sync_get_fence(&job->sync);
 190        if (fence && drm_sched_dependency_optimized(fence, s_entity)) {
 191                r = amdgpu_sync_fence(&job->sched_sync, fence);
 192                if (r)
 193                        DRM_ERROR("Error adding fence (%d)\n", r);
 194        }
 195
 196        while (fence == NULL && vm && !job->vmid) {
 197                r = amdgpu_vmid_grab(vm, ring, &job->sync,
 198                                     &job->base.s_fence->finished,
 199                                     job);
 200                if (r)
 201                        DRM_ERROR("Error getting VM ID (%d)\n", r);
 202
 203                fence = amdgpu_sync_get_fence(&job->sync);
 204        }
 205
 206        return fence;
 207}
 208
 209static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
 210{
 211        struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
 212        struct dma_fence *fence = NULL, *finished;
 213        struct amdgpu_job *job;
 214        int r = 0;
 215
 216        job = to_amdgpu_job(sched_job);
 217        finished = &job->base.s_fence->finished;
 218
 219        BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
 220
 221        trace_amdgpu_sched_run_job(job);
 222
 223        if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
 224                dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
 225
 226        if (finished->error < 0) {
 227                DRM_INFO("Skip scheduling IBs!\n");
 228        } else {
 229                r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
 230                                       &fence);
 231                if (r)
 232                        DRM_ERROR("Error scheduling IBs (%d)\n", r);
 233        }
 234        /* if gpu reset, hw fence will be replaced here */
 235        dma_fence_put(job->fence);
 236        job->fence = dma_fence_get(fence);
 237
 238        amdgpu_job_free_resources(job);
 239
 240        fence = r ? ERR_PTR(r) : fence;
 241        return fence;
 242}
 243
 244#define to_drm_sched_job(sched_job)             \
 245                container_of((sched_job), struct drm_sched_job, queue_node)
 246
 247void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
 248{
 249        struct drm_sched_job *s_job;
 250        struct drm_sched_entity *s_entity = NULL;
 251        int i;
 252
 253        /* Signal all jobs not yet scheduled */
 254        for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
 255                struct drm_sched_rq *rq = &sched->sched_rq[i];
 256
 257                if (!rq)
 258                        continue;
 259
 260                spin_lock(&rq->lock);
 261                list_for_each_entry(s_entity, &rq->entities, list) {
 262                        while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
 263                                struct drm_sched_fence *s_fence = s_job->s_fence;
 264
 265                                dma_fence_signal(&s_fence->scheduled);
 266                                dma_fence_set_error(&s_fence->finished, -EHWPOISON);
 267                                dma_fence_signal(&s_fence->finished);
 268                        }
 269                }
 270                spin_unlock(&rq->lock);
 271        }
 272
 273        /* Signal all jobs already scheduled to HW */
 274        list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
 275                struct drm_sched_fence *s_fence = s_job->s_fence;
 276
 277                dma_fence_set_error(&s_fence->finished, -EHWPOISON);
 278                dma_fence_signal(&s_fence->finished);
 279        }
 280}
 281
 282const struct drm_sched_backend_ops amdgpu_sched_ops = {
 283        .dependency = amdgpu_job_dependency,
 284        .run_job = amdgpu_job_run,
 285        .timedout_job = amdgpu_job_timedout,
 286        .free_job = amdgpu_job_free_cb
 287};
 288