linux/drivers/gpu/drm/etnaviv/etnaviv_sched.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2017 Etnaviv Project
   4 */
   5
   6#include <linux/moduleparam.h>
   7
   8#include "etnaviv_drv.h"
   9#include "etnaviv_dump.h"
  10#include "etnaviv_gem.h"
  11#include "etnaviv_gpu.h"
  12#include "etnaviv_sched.h"
  13#include "state.xml.h"
  14
  15static int etnaviv_job_hang_limit = 0;
  16module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
  17static int etnaviv_hw_jobs_limit = 4;
  18module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
  19
  20static struct dma_fence *
  21etnaviv_sched_dependency(struct drm_sched_job *sched_job,
  22                         struct drm_sched_entity *entity)
  23{
  24        struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
  25        struct dma_fence *fence;
  26        int i;
  27
  28        if (unlikely(submit->in_fence)) {
  29                fence = submit->in_fence;
  30                submit->in_fence = NULL;
  31
  32                if (!dma_fence_is_signaled(fence))
  33                        return fence;
  34
  35                dma_fence_put(fence);
  36        }
  37
  38        for (i = 0; i < submit->nr_bos; i++) {
  39                struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
  40                int j;
  41
  42                if (bo->excl) {
  43                        fence = bo->excl;
  44                        bo->excl = NULL;
  45
  46                        if (!dma_fence_is_signaled(fence))
  47                                return fence;
  48
  49                        dma_fence_put(fence);
  50                }
  51
  52                for (j = 0; j < bo->nr_shared; j++) {
  53                        if (!bo->shared[j])
  54                                continue;
  55
  56                        fence = bo->shared[j];
  57                        bo->shared[j] = NULL;
  58
  59                        if (!dma_fence_is_signaled(fence))
  60                                return fence;
  61
  62                        dma_fence_put(fence);
  63                }
  64                kfree(bo->shared);
  65                bo->nr_shared = 0;
  66                bo->shared = NULL;
  67        }
  68
  69        return NULL;
  70}
  71
  72static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
  73{
  74        struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
  75        struct dma_fence *fence = NULL;
  76
  77        if (likely(!sched_job->s_fence->finished.error))
  78                fence = etnaviv_gpu_submit(submit);
  79        else
  80                dev_dbg(submit->gpu->dev, "skipping bad job\n");
  81
  82        return fence;
  83}
  84
  85static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
  86                                                          *sched_job)
  87{
  88        struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
  89        struct etnaviv_gpu *gpu = submit->gpu;
  90        u32 dma_addr;
  91        int change;
  92
  93        /* block scheduler */
  94        drm_sched_stop(&gpu->sched, sched_job);
  95
  96        /*
  97         * If the GPU managed to complete this jobs fence, the timout is
  98         * spurious. Bail out.
  99         */
 100        if (dma_fence_is_signaled(submit->out_fence))
 101                goto out_no_timeout;
 102
 103        /*
 104         * If the GPU is still making forward progress on the front-end (which
 105         * should never loop) we shift out the timeout to give it a chance to
 106         * finish the job.
 107         */
 108        dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
 109        change = dma_addr - gpu->hangcheck_dma_addr;
 110        if (change < 0 || change > 16) {
 111                gpu->hangcheck_dma_addr = dma_addr;
 112                goto out_no_timeout;
 113        }
 114
 115        if(sched_job)
 116                drm_sched_increase_karma(sched_job);
 117
 118        /* get the GPU back into the init state */
 119        etnaviv_core_dump(submit);
 120        etnaviv_gpu_recover_hang(gpu);
 121
 122        drm_sched_resubmit_jobs(&gpu->sched);
 123
 124        drm_sched_start(&gpu->sched, true);
 125        return DRM_GPU_SCHED_STAT_NOMINAL;
 126
 127out_no_timeout:
 128        /* restart scheduler after GPU is usable again */
 129        drm_sched_start(&gpu->sched, true);
 130        return DRM_GPU_SCHED_STAT_NOMINAL;
 131}
 132
 133static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
 134{
 135        struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
 136
 137        drm_sched_job_cleanup(sched_job);
 138
 139        etnaviv_submit_put(submit);
 140}
 141
 142static const struct drm_sched_backend_ops etnaviv_sched_ops = {
 143        .dependency = etnaviv_sched_dependency,
 144        .run_job = etnaviv_sched_run_job,
 145        .timedout_job = etnaviv_sched_timedout_job,
 146        .free_job = etnaviv_sched_free_job,
 147};
 148
 149int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
 150                           struct etnaviv_gem_submit *submit)
 151{
 152        int ret = 0;
 153
 154        /*
 155         * Hold the fence lock across the whole operation to avoid jobs being
 156         * pushed out of order with regard to their sched fence seqnos as
 157         * allocated in drm_sched_job_init.
 158         */
 159        mutex_lock(&submit->gpu->fence_lock);
 160
 161        ret = drm_sched_job_init(&submit->sched_job, sched_entity,
 162                                 submit->ctx);
 163        if (ret)
 164                goto out_unlock;
 165
 166        submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
 167        submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
 168                                                submit->out_fence, 0,
 169                                                INT_MAX, GFP_KERNEL);
 170        if (submit->out_fence_id < 0) {
 171                drm_sched_job_cleanup(&submit->sched_job);
 172                ret = -ENOMEM;
 173                goto out_unlock;
 174        }
 175
 176        /* the scheduler holds on to the job now */
 177        kref_get(&submit->refcount);
 178
 179        drm_sched_entity_push_job(&submit->sched_job, sched_entity);
 180
 181out_unlock:
 182        mutex_unlock(&submit->gpu->fence_lock);
 183
 184        return ret;
 185}
 186
 187int etnaviv_sched_init(struct etnaviv_gpu *gpu)
 188{
 189        int ret;
 190
 191        ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
 192                             etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
 193                             msecs_to_jiffies(500), NULL, NULL,
 194                             dev_name(gpu->dev));
 195        if (ret)
 196                return ret;
 197
 198        return 0;
 199}
 200
 201void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
 202{
 203        drm_sched_fini(&gpu->sched);
 204}
 205