linux/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 *
  23 */
  24#include <linux/kthread.h>
  25#include <linux/wait.h>
  26#include <linux/sched.h>
  27#include <drm/drmP.h>
  28#include "amdgpu.h"
  29#include "amdgpu_trace.h"
  30
  31int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
  32                     struct amdgpu_job **job)
  33{
  34        size_t size = sizeof(struct amdgpu_job);
  35
  36        if (num_ibs == 0)
  37                return -EINVAL;
  38
  39        size += sizeof(struct amdgpu_ib) * num_ibs;
  40
  41        *job = kzalloc(size, GFP_KERNEL);
  42        if (!*job)
  43                return -ENOMEM;
  44
  45        (*job)->adev = adev;
  46        (*job)->ibs = (void *)&(*job)[1];
  47        (*job)->num_ibs = num_ibs;
  48
  49        amdgpu_sync_create(&(*job)->sync);
  50
  51        return 0;
  52}
  53
  54int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
  55                             struct amdgpu_job **job)
  56{
  57        int r;
  58
  59        r = amdgpu_job_alloc(adev, 1, job);
  60        if (r)
  61                return r;
  62
  63        r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
  64        if (r)
  65                kfree(*job);
  66
  67        return r;
  68}
  69
  70void amdgpu_job_free(struct amdgpu_job *job)
  71{
  72        unsigned i;
  73        struct fence *f;
  74        /* use sched fence if available */
  75        f = (job->base.s_fence)? &job->base.s_fence->base : job->fence;
  76
  77        for (i = 0; i < job->num_ibs; ++i)
  78                amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
  79        fence_put(job->fence);
  80
  81        amdgpu_bo_unref(&job->uf.bo);
  82        amdgpu_sync_free(&job->sync);
  83        kfree(job);
  84}
  85
  86int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
  87                      struct amd_sched_entity *entity, void *owner,
  88                      struct fence **f)
  89{
  90        job->ring = ring;
  91        job->base.sched = &ring->sched;
  92        job->base.s_entity = entity;
  93        job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
  94        if (!job->base.s_fence)
  95                return -ENOMEM;
  96
  97        *f = fence_get(&job->base.s_fence->base);
  98
  99        job->owner = owner;
 100        amd_sched_entity_push_job(&job->base);
 101
 102        return 0;
 103}
 104
 105static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
 106{
 107        struct amdgpu_job *job = to_amdgpu_job(sched_job);
 108        struct amdgpu_vm *vm = job->ibs->vm;
 109
 110        struct fence *fence = amdgpu_sync_get_fence(&job->sync);
 111
 112        if (fence == NULL && vm && !job->ibs->vm_id) {
 113                struct amdgpu_ring *ring = job->ring;
 114                unsigned i, vm_id;
 115                uint64_t vm_pd_addr;
 116                int r;
 117
 118                r = amdgpu_vm_grab_id(vm, ring, &job->sync,
 119                                      &job->base.s_fence->base,
 120                                      &vm_id, &vm_pd_addr);
 121                if (r)
 122                        DRM_ERROR("Error getting VM ID (%d)\n", r);
 123                else {
 124                        for (i = 0; i < job->num_ibs; ++i) {
 125                                job->ibs[i].vm_id = vm_id;
 126                                job->ibs[i].vm_pd_addr = vm_pd_addr;
 127                        }
 128                }
 129
 130                fence = amdgpu_sync_get_fence(&job->sync);
 131        }
 132
 133        return fence;
 134}
 135
 136static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
 137{
 138        struct fence *fence = NULL;
 139        struct amdgpu_job *job;
 140        int r;
 141
 142        if (!sched_job) {
 143                DRM_ERROR("job is null\n");
 144                return NULL;
 145        }
 146        job = to_amdgpu_job(sched_job);
 147
 148        r = amdgpu_sync_wait(&job->sync);
 149        if (r) {
 150                DRM_ERROR("failed to sync wait (%d)\n", r);
 151                return NULL;
 152        }
 153
 154        trace_amdgpu_sched_run_job(job);
 155        r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
 156                               job->sync.last_vm_update, &fence);
 157        if (r) {
 158                DRM_ERROR("Error scheduling IBs (%d)\n", r);
 159                goto err;
 160        }
 161
 162err:
 163        job->fence = fence;
 164        amdgpu_job_free(job);
 165        return fence;
 166}
 167
 168struct amd_sched_backend_ops amdgpu_sched_ops = {
 169        .dependency = amdgpu_job_dependency,
 170        .run_job = amdgpu_job_run,
 171};
 172